aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arch/riscv/vm-layout.rst16
-rw-r--r--Documentation/devicetree/bindings/display/panel/anbernic,rg35xx-plus-panel.yaml (renamed from Documentation/devicetree/bindings/display/panel/wl-355608-a8.yaml)15
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/microchip,lan8650.yaml74
-rw-r--r--Documentation/devicetree/bindings/net/nxp,tja11xx.yaml62
-rw-r--r--Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml10
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml1
-rw-r--r--Documentation/netlink/specs/netdev.yaml61
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst5
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst16
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst3
-rw-r--r--Documentation/networking/devmem.rst269
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/mptcp-sysctl.rst11
-rw-r--r--Documentation/networking/oa-tc6-framework.rst497
-rw-r--r--Documentation/networking/timestamping.rst20
-rw-r--r--Documentation/virt/hyperv/coco.rst260
-rw-r--r--Documentation/virt/hyperv/index.rst1
-rw-r--r--MAINTAINERS18
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/uapi/asm/socket.h6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi36
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi6
-rw-r--r--arch/arm64/kernel/stacktrace.c4
-rw-r--r--arch/mips/include/uapi/asm/socket.h6
-rw-r--r--arch/parisc/include/uapi/asm/socket.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h12
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S4
-rw-r--r--arch/powerpc/lib/qspinlock.c10
-rw-r--r--arch/powerpc/mm/nohash/tlb_64e.c2
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi6
-rw-r--r--arch/riscv/include/asm/processor.h26
-rw-r--r--arch/riscv/include/asm/sbi.h20
-rw-r--r--arch/riscv/kernel/Makefile6
-rw-r--r--arch/riscv/kernel/sbi.c63
-rw-r--r--arch/riscv/kernel/sbi_ecall.c48
-rw-r--r--arch/riscv/kernel/traps_misaligned.c4
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h6
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c21
-rw-r--r--arch/x86/kvm/Kconfig7
-rw-r--r--arch/x86/kvm/mmu/mmu.c4
-rw-r--r--arch/x86/kvm/mmu/spte.c6
-rw-r--r--arch/x86/kvm/mmu/spte.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c8
-rw-r--r--arch/x86/kvm/svm/svm.c15
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--block/bio-integrity.c4
-rw-r--r--drivers/android/binder.c1
-rw-r--r--drivers/block/ublk_drv.c2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c25
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c30
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c12
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c48
-rw-r--r--drivers/clk/qcom/gcc-sm8550.c54
-rw-r--r--drivers/clk/qcom/gcc-sm8650.c56
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c52
-rw-r--r--drivers/clk/starfive/clk-starfive-jh7110-sys.c31
-rw-r--r--drivers/clk/starfive/clk-starfive-jh71x0.h2
-rw-r--r--drivers/clocksource/hyperv_timer.c16
-rw-r--r--drivers/clocksource/timer-imx-tpm.c16
-rw-r--r--drivers/clocksource/timer-of.c17
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/cpufreq/amd-pstate.c34
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c4
-rw-r--r--drivers/gpio/gpio-rockchip.c1
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c1
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/display/Kconfig24
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c (renamed from drivers/gpu/drm/drm_bridge_connector.c)13
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/kmb/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/meson/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c23
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c21
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/Kconfig2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c23
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c12
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c104
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h16
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c3
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h9
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c6
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hyperv_vmbus.h6
-rw-r--r--drivers/hv/vmbus_drv.c5
-rw-r--r--drivers/iio/adc/ad7124.c30
-rw-r--r--drivers/iio/adc/ad7173.c13
-rw-r--r--drivers/iio/adc/ad7606.c28
-rw-r--r--drivers/iio/adc/ad7606.h2
-rw-r--r--drivers/iio/adc/ad7606_par.c48
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c2
-rw-r--r--drivers/iio/adc/ti-ads1119.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c13
-rw-r--r--drivers/iio/inkern.c8
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/misc/fastrpc.c5
-rw-r--r--drivers/misc/keba/cp500.c14
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
-rw-r--r--drivers/mmc/core/quirks.h22
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/host/cqhci-core.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c4
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c1
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c11
-rw-r--r--drivers/net/ethernet/Kconfig11
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h72
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c173
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h68
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c163
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/apple/bmac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c46
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h1
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c506
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c329
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c110
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c395
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h92
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h926
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c2604
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c149
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c997
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c1300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h361
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c480
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c2146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h834
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c1216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c579
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h514
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c780
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c1209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c493
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h13
-rw-r--r--drivers/net/ethernet/microchip/Kconfig6
-rw-r--r--drivers/net/ethernet/microchip/Makefile1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c123
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c646
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Kconfig19
-rw-r--r--drivers/net/ethernet/microchip/lan865x/Makefile6
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c429
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/oa_tc6.c1361
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c6
-rw-r--r--drivers/net/ethernet/sfc/ef10.c127
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c273
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c153
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c75
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c388
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h39
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c287
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h62
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c22
-rw-r--r--drivers/net/netkit.c1
-rw-r--r--drivers/net/phy/dp83822.c35
-rw-r--r--drivers/net/phy/microchip_t1.c413
-rw-r--r--drivers/net/phy/microchip_t1s.c30
-rw-r--r--drivers/net/phy/phylink.c42
-rw-r--r--drivers/net/virtio_net.c95
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c7
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h5
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c154
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c89
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c62
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c30
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c12
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c508
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h118
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c180
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c49
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c138
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c292
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c211
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c188
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c260
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c124
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h59
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c2
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/multipath.c4
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/target/admin-cmd.c10
-rw-r--r--drivers/nvme/target/debugfs.c2
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/u-boot-env.c7
-rw-r--r--drivers/opp/core.c56
-rw-r--r--drivers/pci/pwrctl/core.c26
-rw-r--r--drivers/pci/pwrctl/pci-pwrctl-pwrseq.c2
-rw-r--r--drivers/pci/remove.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c4
-rw-r--r--drivers/platform/cznic/Kconfig2
-rw-r--r--drivers/platform/x86/asus-wmi.c10
-rw-r--r--drivers/platform/x86/panasonic-laptop.c58
-rw-r--r--drivers/pwm/pwm-stm32.c2
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_frac.h26
-rw-r--r--drivers/ufs/host/ufs-mediatek.c3
-rw-r--r--drivers/uio/uio_hv_generic.c11
-rw-r--r--drivers/usb/dwc3/core.c15
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/gadget.c41
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c12
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.h9
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c80
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--fs/bcachefs/alloc_background.c24
-rw-r--r--fs/bcachefs/btree_journal_iter.c2
-rw-r--r--fs/bcachefs/buckets.c15
-rw-r--r--fs/bcachefs/ec.h4
-rw-r--r--fs/bcachefs/extents.c26
-rw-r--r--fs/bcachefs/extents.h23
-rw-r--r--fs/bcachefs/fs.c8
-rw-r--r--fs/bcachefs/fs.h7
-rw-r--r--fs/bcachefs/fsck.c18
-rw-r--r--fs/bcachefs/replicas.c2
-rw-r--r--fs/bcachefs/sysfs.c2
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/netfs/io.c2
-rw-r--r--fs/smb/client/cifssmb.c54
-rw-r--r--fs/smb/client/connect.c14
-rw-r--r--fs/smb/client/inode.c2
-rw-r--r--fs/smb/client/smb2inode.c3
-rw-r--r--fs/smb/client/smb2ops.c8
-rw-r--r--include/kunit/test.h1
-rw-r--r--include/linux/context_tracking.h6
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/mlx5/device.h1
-rw-r--r--include/linux/mlx5/fs.h3
-rw-r--r--include/linux/mlx5/mlx5_ifc.h210
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/linux/oa_tc6.h24
-rw-r--r--include/linux/pci-pwrctl.h3
-rw-r--r--include/linux/phylink.h2
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/skbuff.h62
-rw-r--r--include/linux/skbuff_ref.h9
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/stmmac.h28
-rw-r--r--include/linux/virtio_net.h3
-rw-r--r--include/net/cfg80211.h25
-rw-r--r--include/net/libeth/tx.h129
-rw-r--r--include/net/libeth/types.h25
-rw-r--r--include/net/mac80211.h5
-rw-r--r--include/net/mptcp.h4
-rw-r--r--include/net/netdev_rx_queue.h7
-rw-r--r--include/net/netmem.h132
-rw-r--r--include/net/page_pool/helpers.h39
-rw-r--r--include/net/page_pool/types.h23
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/xfrm.h45
-rw-r--r--include/sound/sof/topology.h2
-rw-r--r--include/trace/events/page_pool.h12
-rw-r--r--include/trace/events/tcp.h12
-rw-r--r--include/uapi/asm-generic/socket.h6
-rw-r--r--include/uapi/drm/panthor_drm.h6
-rw-r--r--include/uapi/linux/libc-compat.h36
-rw-r--r--include/uapi/linux/mdio.h1
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/netdev.h13
-rw-r--r--include/uapi/linux/uio.h18
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--kernel/bpf/btf.c9
-rw-r--r--kernel/bpf/cpumap.c6
-rw-r--r--kernel/bpf/verifier.c36
-rw-r--r--kernel/events/core.c18
-rw-r--r--kernel/events/internal.h1
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--kernel/printk/printk.c1
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_osnoise.c10
-rw-r--r--mm/memory.c27
-rw-r--r--net/Kconfig6
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c33
-rw-r--r--net/core/devmem.c389
-rw-r--r--net/core/devmem.h180
-rw-r--r--net/core/filter.c3
-rw-r--r--net/core/gro.c3
-rw-r--r--net/core/mp_dmabuf_devmem.h44
-rw-r--r--net/core/netdev-genl-gen.c23
-rw-r--r--net/core/netdev-genl-gen.h6
-rw-r--r--net/core/netdev-genl.c139
-rw-r--r--net/core/netdev_rx_queue.c81
-rw-r--r--net/core/netmem_priv.h31
-rw-r--r--net/core/page_pool.c119
-rw-r--r--net/core/page_pool_priv.h46
-rw-r--r--net/core/page_pool_user.c32
-rw-r--r--net/core/skbuff.c112
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/core/sock.c68
-rw-r--r--net/core/sock_map.c1
-rw-r--r--net/dsa/tag_ksz.c5
-rw-r--r--net/ethtool/common.c9
-rw-r--r--net/ethtool/phy.c2
-rw-r--r--net/hsr/hsr_device.c5
-rw-r--r--net/hsr/hsr_main.h1
-rw-r--r--net/hsr/hsr_slave.c11
-rw-r--r--net/ipv4/esp4.c3
-rw-r--r--net/ipv4/fou_core.c4
-rw-r--r--net/ipv4/tcp.c272
-rw-r--r--net/ipv4/tcp_bpf.c4
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_ipv4.c16
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c5
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/mac80211/airtime.c140
-rw-r--r--net/mac80211/cfg.c49
-rw-r--r--net/mac80211/chan.c1
-rw-r--r--net/mac80211/ieee80211_i.h8
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/link.c12
-rw-r--r--net/mac80211/mlme.c13
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/util.c80
-rw-r--r--net/mptcp/ctrl.c133
-rw-r--r--net/mptcp/mib.c3
-rw-r--r--net/mptcp/mib.h3
-rw-r--r--net/mptcp/pm_netlink.c13
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h16
-rw-r--r--net/mptcp/subflow.c4
-rw-r--r--net/netfilter/nf_flow_table_core.c6
-rw-r--r--net/netfilter/nf_flow_table_inet.c2
-rw-r--r--net/netfilter/nft_socket.c48
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/sch_cake.c53
-rw-r--r--net/socket.c10
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/mesh.c2
-rw-r--r--net/wireless/mlme.c20
-rw-r--r--net/wireless/nl80211.c62
-rw-r--r--net/wireless/rdev-ops.h13
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/scan.c45
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/trace.h40
-rw-r--r--net/xdp/xsk_buff_pool.c15
-rw-r--r--net/xdp/xsk_queue.h5
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_policy.c222
-rw-r--r--sound/pci/hda/patch_conexant.c11
-rw-r--r--sound/pci/hda/patch_hdmi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c22
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/codecs/chv3-codec.c1
-rw-r--r--sound/soc/codecs/lpass-va-macro.c11
-rw-r--r--sound/soc/codecs/tda7419.c1
-rw-r--r--sound/soc/google/chv3-i2s.c1
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c2
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c2
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c2
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c2
-rw-r--r--sound/soc/intel/boards/bytcr_wm5102.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c1
-rw-r--r--sound/soc/intel/keembay/kmb_platform.c1
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-mt6359.c17
-rw-r--r--sound/soc/soc-dapm.c1
-rw-r--r--sound/soc/sof/topology.c2
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c143
-rw-r--r--sound/soc/tegra/tegra210_ahub.c10
-rw-r--r--tools/hv/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/hv/lsvmbus2
-rw-r--r--tools/include/uapi/linux/netdev.h13
-rw-r--r--tools/net/ynl/lib/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h6
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c14
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c25
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c24
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c43
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h1
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile11
-rw-r--r--tools/testing/selftests/net/lib/csum.c16
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh4
-rw-r--r--tools/testing/selftests/net/ncdevmem.c570
-rw-r--r--tools/testing/selftests/net/packetdrill/Makefile1
-rw-r--r--tools/testing/selftests/net/packetdrill/config6
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh4
-rwxr-xr-xtools/testing/selftests/net/packetdrill/set_sysctls.py38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt56
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt33
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt34
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt42
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt35
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt39
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt50
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt38
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt36
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt63
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt55
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt41
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt30
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt61
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt63
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt66
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt56
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt44
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt118
-rw-r--r--tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt57
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c18
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy_add_speed.sh83
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_bottomup.c2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_default.c2
-rw-r--r--tools/testing/selftests/riscv/mm/mmap_test.h67
694 files changed, 34943 insertions, 4720 deletions
diff --git a/Documentation/arch/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst
index 077b968dcc81..eabec99b5852 100644
--- a/Documentation/arch/riscv/vm-layout.rst
+++ b/Documentation/arch/riscv/vm-layout.rst
@@ -134,19 +134,3 @@ RISC-V Linux Kernel SV57
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
-
-
-Userspace VAs
---------------------
-To maintain compatibility with software that relies on the VA space with a
-maximum of 48 bits the kernel will, by default, return virtual addresses to
-userspace from a 48-bit range (sv48). This default behavior is achieved by
-passing 0 into the hint address parameter of mmap. On CPUs with an address space
-smaller than sv48, the CPU maximum supported address space will be the default.
-
-Software can "opt-in" to receiving VAs from another VA space by providing
-a hint address to mmap. When a hint address is passed to mmap, the returned
-address will never use more bits than the hint address. For example, if a hint
-address of `1 << 40` is passed to mmap, a valid returned address will never use
-bits 41 through 63. If no mappable addresses are available in that range, mmap
-will return `MAP_FAILED`.
diff --git a/Documentation/devicetree/bindings/display/panel/wl-355608-a8.yaml b/Documentation/devicetree/bindings/display/panel/anbernic,rg35xx-plus-panel.yaml
index e552d01b52b9..1d67492ebd3b 100644
--- a/Documentation/devicetree/bindings/display/panel/wl-355608-a8.yaml
+++ b/Documentation/devicetree/bindings/display/panel/anbernic,rg35xx-plus-panel.yaml
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/display/panel/wl-355608-a8.yaml#
+$id: http://devicetree.org/schemas/display/panel/anbernic,rg35xx-plus-panel.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: WL-355608-A8 3.5" (640x480 pixels) 24-bit IPS LCD panel
+title: Anbernic RG35XX series (WL-355608-A8) 3.5" 640x480 24-bit IPS LCD panel
maintainers:
- Ryan Walklin <[email protected]>
@@ -15,7 +15,14 @@ allOf:
properties:
compatible:
- const: wl-355608-a8
+ oneOf:
+ - const: anbernic,rg35xx-plus-panel
+ - items:
+ - enum:
+ - anbernic,rg35xx-2024-panel
+ - anbernic,rg35xx-h-panel
+ - anbernic,rg35xx-sp-panel
+ - const: anbernic,rg35xx-plus-panel
reg:
maxItems: 1
@@ -40,7 +47,7 @@ examples:
#size-cells = <0>;
panel@0 {
- compatible = "wl-355608-a8";
+ compatible = "anbernic,rg35xx-plus-panel";
reg = <0>;
spi-3wire;
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index ee7a65b528cd..d1e2bca3c503 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -58,18 +58,18 @@ allOf:
- const: timing-adjustment
amlogic,tx-delay-ns:
- $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 2, 4, 6]
+ default: 2
description:
- The internal RGMII TX clock delay (provided by this driver) in
- nanoseconds. Allowed values are 0ns, 2ns, 4ns, 6ns.
- When phy-mode is set to "rgmii" then the TX delay should be
- explicitly configured. When not configured a fallback of 2ns is
- used. When the phy-mode is set to either "rgmii-id" or "rgmii-txid"
- the TX clock delay is already provided by the PHY. In that case
- this property should be set to 0ns (which disables the TX clock
- delay in the MAC to prevent the clock from going off because both
- PHY and MAC are adding a delay).
- Any configuration is ignored when the phy-mode is set to "rmii".
+ The internal RGMII TX clock delay (provided by this driver)
+ in nanoseconds. When phy-mode is set to "rgmii" then the TX
+ delay should be explicitly configured. When the phy-mode is
+ set to either "rgmii-id" or "rgmii-txid" the TX clock delay
+ is already provided by the PHY. In that case this property
+ should be set to 0ns (which disables the TX clock delay in
+ the MAC to prevent the clock from going off because both
+ PHY and MAC are adding a delay). Any configuration is
+ ignored when the phy-mode is set to "rmii".
amlogic,rx-delay-ns:
deprecated: true
diff --git a/Documentation/devicetree/bindings/net/microchip,lan8650.yaml b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml
new file mode 100644
index 000000000000..61e11d4a07c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/microchip,lan8650.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/microchip,lan8650.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip LAN8650/1 10BASE-T1S MACPHY Ethernet Controllers
+
+maintainers:
+ - Parthiban Veerasooran <[email protected]>
+
+description:
+ The LAN8650/1 combines a Media Access Controller (MAC) and an Ethernet
+ PHY to enable 10BASE‑T1S networks. The Ethernet Media Access Controller
+ (MAC) module implements a 10 Mbps half duplex Ethernet MAC, compatible
+ with the IEEE 802.3 standard and a 10BASE-T1S physical layer transceiver
+ integrated into the LAN8650/1. The communication between the Host and
+ the MAC-PHY is specified in the OPEN Alliance 10BASE-T1x MACPHY Serial
+ Interface (TC6).
+
+allOf:
+ - $ref: /schemas/net/ethernet-controller.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - const: microchip,lan8650
+ - items:
+ - const: microchip,lan8651
+ - const: microchip,lan8650
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Interrupt from MAC-PHY asserted in the event of Receive Chunks
+ Available, Transmit Chunk Credits Available and Extended Status
+ Event.
+ maxItems: 1
+
+ spi-max-frequency:
+ minimum: 15000000
+ maximum: 25000000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - spi-max-frequency
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "microchip,lan8651", "microchip,lan8650";
+ reg = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0_pins>;
+ interrupt-parent = <&gpio>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>;
+ local-mac-address = [04 05 06 01 02 03];
+ spi-max-frequency = <15000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
index 85bfa45f5122..a754a61adc2d 100644
--- a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
@@ -14,8 +14,53 @@ maintainers:
description:
Bindings for NXP TJA11xx automotive PHYs
+properties:
+ compatible:
+ enum:
+ - ethernet-phy-id0180.dc40
+ - ethernet-phy-id0180.dc41
+ - ethernet-phy-id0180.dc48
+ - ethernet-phy-id0180.dd00
+ - ethernet-phy-id0180.dd01
+ - ethernet-phy-id0180.dd02
+ - ethernet-phy-id0180.dc80
+ - ethernet-phy-id0180.dc82
+ - ethernet-phy-id001b.b010
+ - ethernet-phy-id001b.b013
+ - ethernet-phy-id001b.b030
+ - ethernet-phy-id001b.b031
+
allOf:
- $ref: ethernet-phy.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ethernet-phy-id0180.dc40
+ - ethernet-phy-id0180.dc41
+ - ethernet-phy-id0180.dc48
+ - ethernet-phy-id0180.dd00
+ - ethernet-phy-id0180.dd01
+ - ethernet-phy-id0180.dd02
+
+ then:
+ properties:
+ nxp,rmii-refclk-in:
+ type: boolean
+ description: |
+ The REF_CLK is provided for both transmitted and received data
+ in RMII mode. This clock signal is provided by the PHY and is
+ typically derived from an external 25MHz crystal. Alternatively,
+ a 50MHz clock signal generated by an external oscillator can be
+ connected to pin REF_CLK. A third option is to connect a 25MHz
+ clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
+ as input or output according to the actual circuit connection.
+ If present, indicates that the REF_CLK will be configured as
+ interface reference clock input when RMII mode enabled.
+ If not present, the REF_CLK will be configured as interface
+ reference clock output when RMII mode enabled.
+ Only supported on TJA1100 and TJA1101.
patternProperties:
"^ethernet-phy@[0-9a-f]+$":
@@ -32,22 +77,6 @@ patternProperties:
description:
The ID number for the child PHY. Should be +1 of parent PHY.
- nxp,rmii-refclk-in:
- type: boolean
- description: |
- The REF_CLK is provided for both transmitted and received data
- in RMII mode. This clock signal is provided by the PHY and is
- typically derived from an external 25MHz crystal. Alternatively,
- a 50MHz clock signal generated by an external oscillator can be
- connected to pin REF_CLK. A third option is to connect a 25MHz
- clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
- as input or output according to the actual circuit connection.
- If present, indicates that the REF_CLK will be configured as
- interface reference clock input when RMII mode enabled.
- If not present, the REF_CLK will be configured as interface
- reference clock output when RMII mode enabled.
- Only supported on TJA1100 and TJA1101.
-
required:
- reg
@@ -60,6 +89,7 @@ examples:
#size-cells = <0>;
tja1101_phy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-id0180.dc40";
reg = <0x4>;
nxp,rmii-refclk-in;
};
diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml
index 917c40d5c382..1cbe44ab23b1 100644
--- a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.yaml
@@ -28,7 +28,7 @@ unevaluatedProperties: false
examples:
- |
- nvmem {
+ soc-nvmem {
compatible = "xlnx,zynqmp-nvmem-fw";
nvmem-layout {
compatible = "fixed-layout";
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 78c6d5b64138..35b20e53b513 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -31,11 +31,17 @@ properties:
- rockchip,rk3588-pcie3-pipe-grf
- rockchip,rk3588-usb-grf
- rockchip,rk3588-usbdpphy-grf
- - rockchip,rk3588-vo-grf
+ - rockchip,rk3588-vo0-grf
+ - rockchip,rk3588-vo1-grf
- rockchip,rk3588-vop-grf
- rockchip,rv1108-usbgrf
- const: syscon
- items:
+ - const: rockchip,rk3588-vo-grf
+ - const: syscon
+ deprecated: true
+ description: Use rockchip,rk3588-vo{0,1}-grf instead.
+ - items:
- enum:
- rockchip,px30-grf
- rockchip,px30-pmugrf
@@ -262,6 +268,8 @@ allOf:
contains:
enum:
- rockchip,rk3588-vo-grf
+ - rockchip,rk3588-vo0-grf
+ - rockchip,rk3588-vo1-grf
then:
required:
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index af525ed29792..30d8342cacc8 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -109,7 +109,6 @@ attribute-sets:
-
name: port
type: u16
- byte-order: big-endian
-
name: flags
type: u32
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 959755be4d7f..08412c279297 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -167,6 +167,10 @@ attribute-sets:
"re-attached", they are just waiting to disappear.
Attribute is absent if Page Pool has not been detached, and
can still be used to allocate new memory.
+ -
+ name: dmabuf
+ doc: ID of the dmabuf this page-pool is attached to.
+ type: u32
-
name: page-pool-info
subset-of: page-pool
@@ -268,6 +272,10 @@ attribute-sets:
name: napi-id
doc: ID of the NAPI instance which services this queue.
type: u32
+ -
+ name: dmabuf
+ doc: ID of the dmabuf attached to this queue, if any.
+ type: u32
-
name: qstats
@@ -457,6 +465,39 @@ attribute-sets:
Number of times driver re-started accepting send
requests to this queue from the stack.
type: uint
+ -
+ name: queue-id
+ subset-of: queue
+ attributes:
+ -
+ name: id
+ -
+ name: type
+ -
+ name: dmabuf
+ attributes:
+ -
+ name: ifindex
+ doc: netdev ifindex to bind the dmabuf to.
+ type: u32
+ checks:
+ min: 1
+ -
+ name: queues
+ doc: receive queues to bind the dmabuf to.
+ type: nest
+ nested-attributes: queue-id
+ multi-attr: true
+ -
+ name: fd
+ doc: dmabuf file descriptor to bind.
+ type: u32
+ -
+ name: id
+ doc: id of the dmabuf binding
+ type: u32
+ checks:
+ min: 1
operations:
list:
@@ -510,6 +551,7 @@ operations:
- inflight
- inflight-mem
- detach-time
+ - dmabuf
dump:
reply: *pp-reply
config-cond: page-pool
@@ -574,6 +616,7 @@ operations:
- type
- napi-id
- ifindex
+ - dmabuf
dump:
request:
attributes:
@@ -619,6 +662,24 @@ operations:
- rx-bytes
- tx-packets
- tx-bytes
+ -
+ name: bind-rx
+ doc: Bind dmabuf to netdev
+ attribute-set: dmabuf
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - ifindex
+ - fd
+ - queues
+ reply:
+ attributes:
+ - id
+
+kernel-family:
+ headers: [ "linux/list.h"]
+ sock-priv: struct list_head
mcast-groups:
list:
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index a4c7d0c65fd7..4561e8ab9e08 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -230,6 +230,11 @@ per-queue stats) from the device.
In addition the driver logs the stats to syslog upon device reset.
+On supported instance types, the statistics will also include the
+ENA Express data (fields prefixed with `ena_srd`). For a complete
+documentation of ENA Express data refer to
+https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ena-express.html#ena-express-monitor
+
MTU
===
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index 3bd72577af9a..99d95be4d159 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -218,6 +218,22 @@ the software port.
[#accel]_.
- Informative
+ * - `rx[i]_hds_nosplit_packets`
+ - Number of packets that were not split in header/data split mode. A
+ packet will not get split when the hardware does not support its
+ protocol splitting. An example such a protocol is ICMPv4/v6. Currently
+ TCP and UDP with IPv4/IPv6 are supported for header/data split
+ [#accel]_.
+ - Informative
+
+ * - `rx[i]_hds_nosplit_bytes`
+ - Number of bytes for packets that were not split in header/data split
+ mode. A packet will not get split when the hardware does not support its
+ protocol splitting. An example such a protocol is ICMPv4/v6. Currently
+ TCP and UDP with IPv4/IPv6 are supported for header/data split
+ [#accel]_.
+ - Informative
+
* - `rx[i]_lro_packets`
- The number of LRO packets received on ring i [#accel]_.
- Acceleration
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
index 20d3b7e87049..34e911480108 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/kconfig.rst
@@ -130,6 +130,9 @@ Enabling the driver and kconfig options
| Build support for software-managed steering in the NIC.
+**CONFIG_MLX5_HW_STEERING=(y/n)**
+
+| Build support for hardware-managed steering in the NIC.
**CONFIG_MLX5_TC_CT=(y/n)**
diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst
new file mode 100644
index 000000000000..a55bf21f671c
--- /dev/null
+++ b/Documentation/networking/devmem.rst
@@ -0,0 +1,269 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Device Memory TCP
+=================
+
+
+Intro
+=====
+
+Device memory TCP (devmem TCP) enables receiving data directly into device
+memory (dmabuf). The feature is currently implemented for TCP sockets.
+
+
+Opportunity
+-----------
+
+A large number of data transfers have device memory as the source and/or
+destination. Accelerators drastically increased the prevalence of such
+transfers. Some examples include:
+
+- Distributed training, where ML accelerators, such as GPUs on different hosts,
+ exchange data.
+
+- Distributed raw block storage applications transfer large amounts of data with
+ remote SSDs. Much of this data does not require host processing.
+
+Typically the Device-to-Device data transfers in the network are implemented as
+the following low-level operations: Device-to-Host copy, Host-to-Host network
+transfer, and Host-to-Device copy.
+
+The flow involving host copies is suboptimal, especially for bulk data transfers,
+and can put significant strains on system resources such as host memory
+bandwidth and PCIe bandwidth.
+
+Devmem TCP optimizes this use case by implementing socket APIs that enable
+the user to receive incoming network packets directly into device memory.
+
+Packet payloads go directly from the NIC to device memory.
+
+Packet headers go to host memory and are processed by the TCP/IP stack
+normally. The NIC must support header split to achieve this.
+
+Advantages:
+
+- Alleviate host memory bandwidth pressure, compared to existing
+ network-transfer + device-copy semantics.
+
+- Alleviate PCIe bandwidth pressure, by limiting data transfer to the lowest
+ level of the PCIe tree, compared to the traditional path which sends data
+ through the root complex.
+
+
+More Info
+---------
+
+ slides, video
+ https://netdevconf.org/0x17/sessions/talk/device-memory-tcp.html
+
+ patchset
+ [PATCH net-next v24 00/13] Device Memory TCP
+ https://lore.kernel.org/netdev/[email protected]/
+
+
+Interface
+=========
+
+
+Example
+-------
+
+tools/testing/selftests/net/ncdevmem.c:do_server shows an example of setting up
+the RX path of this API.
+
+
+NIC Setup
+---------
+
+Header split, flow steering, & RSS are required features for devmem TCP.
+
+Header split is used to split incoming packets into a header buffer in host
+memory, and a payload buffer in device memory.
+
+Flow steering & RSS are used to ensure that only flows targeting devmem land on
+an RX queue bound to devmem.
+
+Enable header split & flow steering::
+
+ # enable header split
+ ethtool -G eth1 tcp-data-split on
+
+
+ # enable flow steering
+ ethtool -K eth1 ntuple on
+
+Configure RSS to steer all traffic away from the target RX queue (queue 15 in
+this example)::
+
+ ethtool --set-rxfh-indir eth1 equal 15
+
+
+The user must bind a dmabuf to any number of RX queues on a given NIC using
+the netlink API::
+
+ /* Bind dmabuf to NIC RX queue 15 */
+ struct netdev_queue *queues;
+ queues = malloc(sizeof(*queues) * 1);
+
+ queues[0]._present.type = 1;
+ queues[0]._present.idx = 1;
+ queues[0].type = NETDEV_RX_QUEUE_TYPE_RX;
+ queues[0].idx = 15;
+
+ *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+
+ req = netdev_bind_rx_req_alloc();
+ netdev_bind_rx_req_set_ifindex(req, 1 /* ifindex */);
+ netdev_bind_rx_req_set_dmabuf_fd(req, dmabuf_fd);
+ __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
+
+ rsp = netdev_bind_rx(*ys, req);
+
+ dmabuf_id = rsp->dmabuf_id;
+
+
+The netlink API returns a dmabuf_id: a unique ID that refers to this dmabuf
+that has been bound.
+
+The user can unbind the dmabuf from the netdevice by closing the netlink socket
+that established the binding. We do this so that the binding is automatically
+unbound even if the userspace process crashes.
+
+Note that any reasonably well-behaved dmabuf from any exporter should work with
+devmem TCP, even if the dmabuf is not actually backed by devmem. An example of
+this is udmabuf, which wraps user memory (non-devmem) in a dmabuf.
+
+
+Socket Setup
+------------
+
+The socket must be flow steered to the dmabuf bound RX queue::
+
+ ethtool -N eth1 flow-type tcp4 ... queue 15
+
+
+Receiving data
+--------------
+
+The user application must signal to the kernel that it is capable of receiving
+devmem data by passing the MSG_SOCK_DEVMEM flag to recvmsg::
+
+ ret = recvmsg(fd, &msg, MSG_SOCK_DEVMEM);
+
+Applications that do not specify the MSG_SOCK_DEVMEM flag will receive an EFAULT
+on devmem data.
+
+Devmem data is received directly into the dmabuf bound to the NIC in 'NIC
+Setup', and the kernel signals such to the user via the SCM_DEVMEM_* cmsgs::
+
+ for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ if (cm->cmsg_level != SOL_SOCKET ||
+ (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
+ cm->cmsg_type != SCM_DEVMEM_LINEAR))
+ continue;
+
+ dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
+
+ if (cm->cmsg_type == SCM_DEVMEM_DMABUF) {
+ /* Frag landed in dmabuf.
+ *
+ * dmabuf_cmsg->dmabuf_id is the dmabuf the
+ * frag landed on.
+ *
+ * dmabuf_cmsg->frag_offset is the offset into
+ * the dmabuf where the frag starts.
+ *
+ * dmabuf_cmsg->frag_size is the size of the
+ * frag.
+ *
+ * dmabuf_cmsg->frag_token is a token used to
+ * refer to this frag for later freeing.
+ */
+
+ struct dmabuf_token token;
+ token.token_start = dmabuf_cmsg->frag_token;
+ token.token_count = 1;
+ continue;
+ }
+
+ if (cm->cmsg_type == SCM_DEVMEM_LINEAR)
+ /* Frag landed in linear buffer.
+ *
+ * dmabuf_cmsg->frag_size is the size of the
+ * frag.
+ */
+ continue;
+
+ }
+
+Applications may receive 2 cmsgs:
+
+- SCM_DEVMEM_DMABUF: this indicates the fragment landed in the dmabuf indicated
+ by dmabuf_id.
+
+- SCM_DEVMEM_LINEAR: this indicates the fragment landed in the linear buffer.
+ This typically happens when the NIC is unable to split the packet at the
+ header boundary, such that part (or all) of the payload landed in host
+ memory.
+
+Applications may receive no SO_DEVMEM_* cmsgs. That indicates non-devmem,
+regular TCP data that landed on an RX queue not bound to a dmabuf.
+
+
+Freeing frags
+-------------
+
+Frags received via SCM_DEVMEM_DMABUF are pinned by the kernel while the user
+processes the frag. The user must return the frag to the kernel via
+SO_DEVMEM_DONTNEED::
+
+ ret = setsockopt(client_fd, SOL_SOCKET, SO_DEVMEM_DONTNEED, &token,
+ sizeof(token));
+
+The user must ensure the tokens are returned to the kernel in a timely manner.
+Failure to do so will exhaust the limited dmabuf that is bound to the RX queue
+and will lead to packet drops.
+
+
+Implementation & Caveats
+========================
+
+Unreadable skbs
+---------------
+
+Devmem payloads are inaccessible to the kernel processing the packets. This
+results in a few quirks for payloads of devmem skbs:
+
+- Loopback is not functional. Loopback relies on copying the payload, which is
+ not possible with devmem skbs.
+
+- Software checksum calculation fails.
+
+- TCP Dump and bpf can't access devmem packet payloads.
+
+
+Testing
+=======
+
+More realistic example code can be found in the kernel source under
+``tools/testing/selftests/net/ncdevmem.c``
+
+ncdevmem is a devmem TCP netcat. It works very similarly to netcat, but
+receives data directly into a udmabuf.
+
+To run ncdevmem, you need to run it on a server on the machine under test, and
+you need to run netcat on a peer to provide the TX data.
+
+ncdevmem has a validation mode as well that expects a repeating pattern of
+incoming data and validates it as such. For example, you can launch
+ncdevmem on the server by::
+
+ ncdevmem -s <server IP> -c <client IP> -f eth1 -d 3 -n 0000:06:00.0 -l \
+ -p 5201 -v 7
+
+On client side, use regular netcat to send TX data to ncdevmem process
+on the server::
+
+ yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
+ tr \\n \\0 | head -c 5G | nc <server IP> 5201 -p 5201
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index c71b87346178..803dfc1efb75 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -49,6 +49,7 @@ Contents:
cdc_mbim
dccp
dctcp
+ devmem
dns_resolver
driver
eql
@@ -87,6 +88,7 @@ Contents:
nexthop-group-resilient
nf_conntrack-sysctl
nf_flowtable
+ oa-tc6-framework
openvswitch
operstates
packet_mmap
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index fd514bba8c43..95598c21fc8e 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -34,6 +34,17 @@ available_schedulers - STRING
Shows the available schedulers choices that are registered. More packet
schedulers may be available, but not loaded.
+blackhole_timeout - INTEGER (seconds)
+ Initial time period in second to disable MPTCP on active MPTCP sockets
+ when a MPTCP firewall blackhole issue happens. This time period will
+ grow exponentially when more blackhole issues get detected right after
+ MPTCP is re-enabled and will reset to the initial value when the
+ blackhole issue goes away.
+
+ 0 to disable the blackhole detection.
+
+ Default: 3600
+
checksum_enabled - BOOLEAN
Control whether DSS checksum can be enabled.
diff --git a/Documentation/networking/oa-tc6-framework.rst b/Documentation/networking/oa-tc6-framework.rst
new file mode 100644
index 000000000000..fe2aabde923a
--- /dev/null
+++ b/Documentation/networking/oa-tc6-framework.rst
@@ -0,0 +1,497 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+=========================================================================
+OPEN Alliance 10BASE-T1x MAC-PHY Serial Interface (TC6) Framework Support
+=========================================================================
+
+Introduction
+------------
+
+The IEEE 802.3cg project defines two 10 Mbit/s PHYs operating over a
+single pair of conductors. The 10BASE-T1L (Clause 146) is a long reach
+PHY supporting full duplex point-to-point operation over 1 km of single
+balanced pair of conductors. The 10BASE-T1S (Clause 147) is a short reach
+PHY supporting full / half duplex point-to-point operation over 15 m of
+single balanced pair of conductors, or half duplex multidrop bus
+operation over 25 m of single balanced pair of conductors.
+
+Furthermore, the IEEE 802.3cg project defines the new Physical Layer
+Collision Avoidance (PLCA) Reconciliation Sublayer (Clause 148) meant to
+provide improved determinism to the CSMA/CD media access method. PLCA
+works in conjunction with the 10BASE-T1S PHY operating in multidrop mode.
+
+The aforementioned PHYs are intended to cover the low-speed / low-cost
+applications in industrial and automotive environment. The large number
+of pins (16) required by the MII interface, which is specified by the
+IEEE 802.3 in Clause 22, is one of the major cost factors that need to be
+addressed to fulfil this objective.
+
+The MAC-PHY solution integrates an IEEE Clause 4 MAC and a 10BASE-T1x PHY
+exposing a low pin count Serial Peripheral Interface (SPI) to the host
+microcontroller. This also enables the addition of Ethernet functionality
+to existing low-end microcontrollers which do not integrate a MAC
+controller.
+
+Overview
+--------
+
+The MAC-PHY is specified to carry both data (Ethernet frames) and control
+(register access) transactions over a single full-duplex serial peripheral
+interface.
+
+Protocol Overview
+-----------------
+
+Two types of transactions are defined in the protocol: data transactions
+for Ethernet frame transfers and control transactions for register
+read/write transfers. A chunk is the basic element of data transactions
+and is composed of 4 bytes of overhead plus 64 bytes of payload size for
+each chunk. Ethernet frames are transferred over one or more data chunks.
+Control transactions consist of one or more register read/write control
+commands.
+
+SPI transactions are initiated by the SPI host with the assertion of CSn
+low to the MAC-PHY and ends with the deassertion of CSn high. In between
+each SPI transaction, the SPI host may need time for additional
+processing and to setup the next SPI data or control transaction.
+
+SPI data transactions consist of an equal number of transmit (TX) and
+receive (RX) chunks. Chunks in both transmit and receive directions may
+or may not contain valid frame data independent from each other, allowing
+for the simultaneous transmission and reception of different length
+frames.
+
+Each transmit data chunk begins with a 32-bit data header followed by a
+data chunk payload on MOSI. The data header indicates whether transmit
+frame data is present and provides the information to determine which
+bytes of the payload contain valid frame data.
+
+In parallel, receive data chunks are received on MISO. Each receive data
+chunk consists of a data chunk payload ending with a 32-bit data footer.
+The data footer indicates if there is receive frame data present within
+the payload or not and provides the information to determine which bytes
+of the payload contain valid frame data.
+
+Reference
+---------
+
+10BASE-T1x MAC-PHY Serial Interface Specification,
+
+Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+
+Hardware Architecture
+---------------------
+
+.. code-block:: none
+
+ +----------+ +-------------------------------------+
+ | | | MAC-PHY |
+ | |<---->| +-----------+ +-------+ +-------+ |
+ | SPI Host | | | SPI Slave | | MAC | | PHY | |
+ | | | +-----------+ +-------+ +-------+ |
+ +----------+ +-------------------------------------+
+
+Software Architecture
+---------------------
+
+.. code-block:: none
+
+ +----------------------------------------------------------+
+ | Networking Subsystem |
+ +----------------------------------------------------------+
+ / \ / \
+ | |
+ | |
+ \ / |
+ +----------------------+ +-----------------------------+
+ | MAC Driver |<--->| OPEN Alliance TC6 Framework |
+ +----------------------+ +-----------------------------+
+ / \ / \
+ | |
+ | |
+ | \ /
+ +----------------------------------------------------------+
+ | SPI Subsystem |
+ +----------------------------------------------------------+
+ / \
+ |
+ |
+ \ /
+ +----------------------------------------------------------+
+ | 10BASE-T1x MAC-PHY Device |
+ +----------------------------------------------------------+
+
+Implementation
+--------------
+
+MAC Driver
+~~~~~~~~~~
+
+- Probed by SPI subsystem.
+
+- Initializes OA TC6 framework for the MAC-PHY.
+
+- Registers and configures the network device.
+
+- Sends the tx ethernet frames from n/w subsystem to OA TC6 framework.
+
+OPEN Alliance TC6 Framework
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Initializes PHYLIB interface.
+
+- Registers mac-phy interrupt.
+
+- Performs mac-phy register read/write operation using the control
+ transaction protocol specified in the OPEN Alliance 10BASE-T1x MAC-PHY
+ Serial Interface specification.
+
+- Performs Ethernet frames transaction using the data transaction protocol
+ for Ethernet frames specified in the OPEN Alliance 10BASE-T1x MAC-PHY
+ Serial Interface specification.
+
+- Forwards the received Ethernet frame from 10Base-T1x MAC-PHY to n/w
+ subsystem.
+
+Data Transaction
+~~~~~~~~~~~~~~~~
+
+The Ethernet frames that are typically transferred from the SPI host to
+the MAC-PHY will be converted into multiple transmit data chunks. Each
+transmit data chunk will have a 4 bytes header which contains the
+information needed to determine the validity and the location of the
+transmit frame data within the 64 bytes data chunk payload.
+
+.. code-block:: none
+
+ +---------------------------------------------------+
+ | Tx Chunk |
+ | +---------------------------+ +----------------+ | MOSI
+ | | 64 bytes chunk payload | | 4 bytes header | |------------>
+ | +---------------------------+ +----------------+ |
+ +---------------------------------------------------+
+
+4 bytes header contains the below fields,
+
+DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI
+ transaction. For TX data chunks, this bit shall be ’1’.
+ 0 - Control command
+ 1 - Data chunk
+
+SEQ (Bit 30) - Data Chunk Sequence. This bit is used to indicate an
+ even/odd transmit data chunk sequence to the MAC-PHY.
+
+NORX (Bit 29) - No Receive flag. The SPI host may set this bit to prevent
+ the MAC-PHY from conveying RX data on the MISO for the
+ current chunk (DV = 0 in the footer), indicating that the
+ host would not process it. Typically, the SPI host should
+ set NORX = 0 indicating that it will accept and process
+ any receive frame data within the current chunk.
+
+RSVD (Bit 28..24) - Reserved: All reserved bits shall be ‘0’.
+
+VS (Bit 23..22) - Vendor Specific. These bits are implementation specific.
+ If the MAC-PHY does not implement these bits, the host
+ shall set them to ‘0’.
+
+DV (Bit 21) - Data Valid flag. The SPI host uses this bit to indicate
+ whether the current chunk contains valid transmit frame data
+ (DV = 1) or not (DV = 0). When ‘0’, the MAC-PHY ignores the
+ chunk payload. Note that the receive path is unaffected by
+ the setting of the DV bit in the data header.
+
+SV (Bit 20) - Start Valid flag. The SPI host shall set this bit when the
+ beginning of an Ethernet frame is present in the current
+ transmit data chunk payload. Otherwise, this bit shall be
+ zero. This bit is not to be confused with the Start-of-Frame
+ Delimiter (SFD) byte described in IEEE 802.3 [2].
+
+SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field shall
+ contain the 32-bit word offset into the transmit data
+ chunk payload that points to the start of a new
+ Ethernet frame to be transmitted. The host shall write
+ this field as zero when SV = 0.
+
+RSVD (Bit 15) - Reserved: All reserved bits shall be ‘0’.
+
+EV (Bit 14) - End Valid flag. The SPI host shall set this bit when the end
+ of an Ethernet frame is present in the current transmit data
+ chunk payload. Otherwise, this bit shall be zero.
+
+EBO (Bit 13..8) - End Byte Offset. When EV = 1, this field shall contain
+ the byte offset into the transmit data chunk payload
+ that points to the last byte of the Ethernet frame to
+ transmit. This field shall be zero when EV = 0.
+
+TSC (Bit 7..6) - Timestamp Capture. Request a timestamp capture when the
+ frame is transmitted onto the network.
+ 00 - Do not capture a timestamp
+ 01 - Capture timestamp into timestamp capture register A
+ 10 - Capture timestamp into timestamp capture register B
+ 11 - Capture timestamp into timestamp capture register C
+
+RSVD (Bit 5..1) - Reserved: All reserved bits shall be ‘0’.
+
+P (Bit 0) - Parity. Parity bit calculated over the transmit data header.
+ Method used is odd parity.
+
+The number of buffers available in the MAC-PHY to store the incoming
+transmit data chunk payloads is represented as transmit credits. The
+available transmit credits in the MAC-PHY can be read either from the
+Buffer Status Register or footer (Refer below for the footer info)
+received from the MAC-PHY. The SPI host should not write more data chunks
+than the available transmit credits as this will lead to transmit buffer
+overflow error.
+
+In case the previous data footer had no transmit credits available and
+once the transmit credits become available for transmitting transmit data
+chunks, the MAC-PHY interrupt is asserted to SPI host. On reception of the
+first data header this interrupt will be deasserted and the received
+footer for the first data chunk will have the transmit credits available
+information.
+
+The Ethernet frames that are typically transferred from MAC-PHY to SPI
+host will be sent as multiple receive data chunks. Each receive data
+chunk will have 64 bytes of data chunk payload followed by 4 bytes footer
+which contains the information needed to determine the validity and the
+location of the receive frame data within the 64 bytes data chunk payload.
+
+.. code-block:: none
+
+ +---------------------------------------------------+
+ | Rx Chunk |
+ | +----------------+ +---------------------------+ | MISO
+ | | 4 bytes footer | | 64 bytes chunk payload | |------------>
+ | +----------------+ +---------------------------+ |
+ +---------------------------------------------------+
+
+4 bytes footer contains the below fields,
+
+EXST (Bit 31) - Extended Status. This bit is set when any bit in the
+ STATUS0 or STATUS1 registers are set and not masked.
+
+HDRB (Bit 30) - Received Header Bad. When set, indicates that the MAC-PHY
+ received a control or data header with a parity error.
+
+SYNC (Bit 29) - Configuration Synchronized flag. This bit reflects the
+ state of the SYNC bit in the CONFIG0 configuration
+ register (see Table 12). A zero indicates that the MAC-PHY
+ configuration may not be as expected by the SPI host.
+ Following configuration, the SPI host sets the
+ corresponding bitin the configuration register which is
+ reflected in this field.
+
+RCA (Bit 28..24) - Receive Chunks Available. The RCA field indicates to
+ the SPI host the minimum number of additional receive
+ data chunks of frame data that are available for
+ reading beyond the current receive data chunk. This
+ field is zero when there is no receive frame data
+ pending in the MAC-PHY’s buffer for reading.
+
+VS (Bit 23..22) - Vendor Specific. These bits are implementation specific.
+ If not implemented, the MAC-PHY shall set these bits to
+ ‘0’.
+
+DV (Bit 21) - Data Valid flag. The MAC-PHY uses this bit to indicate
+ whether the current receive data chunk contains valid
+ receive frame data (DV = 1) or not (DV = 0). When ‘0’, the
+ SPI host shall ignore the chunk payload.
+
+SV (Bit 20) - Start Valid flag. The MAC-PHY sets this bit when the current
+ chunk payload contains the start of an Ethernet frame.
+ Otherwise, this bit is zero. The SV bit is not to be
+ confused with the Start-of-Frame Delimiter (SFD) byte
+ described in IEEE 802.3 [2].
+
+SWO (Bit 19..16) - Start Word Offset. When SV = 1, this field contains the
+ 32-bit word offset into the receive data chunk payload
+ containing the first byte of a new received Ethernet
+ frame. When a receive timestamp has been added to the
+ beginning of the received Ethernet frame (RTSA = 1)
+ then SWO points to the most significant byte of the
+ timestamp. This field will be zero when SV = 0.
+
+FD (Bit 15) - Frame Drop. When set, this bit indicates that the MAC has
+ detected a condition for which the SPI host should drop the
+ received Ethernet frame. This bit is only valid at the end
+ of a received Ethernet frame (EV = 1) and shall be zero at
+ all other times.
+
+EV (Bit 14) - End Valid flag. The MAC-PHY sets this bit when the end of a
+ received Ethernet frame is present in this receive data
+ chunk payload.
+
+EBO (Bit 13..8) - End Byte Offset: When EV = 1, this field contains the
+ byte offset into the receive data chunk payload that
+ locates the last byte of the received Ethernet frame.
+ This field is zero when EV = 0.
+
+RTSA (Bit 7) - Receive Timestamp Added. This bit is set when a 32-bit or
+ 64-bit timestamp has been added to the beginning of the
+ received Ethernet frame. The MAC-PHY shall set this bit to
+ zero when SV = 0.
+
+RTSP (Bit 6) - Receive Timestamp Parity. Parity bit calculated over the
+ 32-bit/64-bit timestamp added to the beginning of the
+ received Ethernet frame. Method used is odd parity. The
+ MAC-PHY shall set this bit to zero when RTSA = 0.
+
+TXC (Bit 5..1) - Transmit Credits. This field contains the minimum number
+ of transmit data chunks of frame data that the SPI host
+ can write in a single transaction without incurring a
+ transmit buffer overflow error.
+
+P (Bit 0) - Parity. Parity bit calculated over the receive data footer.
+ Method used is odd parity.
+
+SPI host will initiate the data receive transaction based on the receive
+chunks available in the MAC-PHY which is provided in the receive chunk
+footer (RCA - Receive Chunks Available). SPI host will create data invalid
+transmit data chunks (empty chunks) or data valid transmit data chunks in
+case there are valid Ethernet frames to transmit to the MAC-PHY. The
+receive chunks available in MAC-PHY can be read either from the Buffer
+Status Register or footer.
+
+In case the previous data footer had no receive data chunks available and
+once the receive data chunks become available again for reading, the
+MAC-PHY interrupt is asserted to SPI host. On reception of the first data
+header this interrupt will be deasserted and the received footer for the
+first data chunk will have the receive chunks available information.
+
+MAC-PHY Interrupt
+~~~~~~~~~~~~~~~~~
+
+The MAC-PHY interrupt is asserted when the following conditions are met.
+
+Receive chunks available - This interrupt is asserted when the previous
+data footer had no receive data chunks available and once the receive
+data chunks become available for reading. On reception of the first data
+header this interrupt will be deasserted.
+
+Transmit chunk credits available - This interrupt is asserted when the
+previous data footer indicated no transmit credits available and once the
+transmit credits become available for transmitting transmit data chunks.
+On reception of the first data header this interrupt will be deasserted.
+
+Extended status event - This interrupt is asserted when the previous data
+footer indicated no extended status and once the extended event become
+available. In this case the host should read status #0 register to know
+the corresponding error/event. On reception of the first data header this
+interrupt will be deasserted.
+
+Control Transaction
+~~~~~~~~~~~~~~~~~~~
+
+4 bytes control header contains the below fields,
+
+DNC (Bit 31) - Data-Not-Control flag. This flag specifies the type of SPI
+ transaction. For control commands, this bit shall be ‘0’.
+ 0 - Control command
+ 1 - Data chunk
+
+HDRB (Bit 30) - Received Header Bad. When set by the MAC-PHY, indicates
+ that a header was received with a parity error. The SPI
+ host should always clear this bit. The MAC-PHY ignores the
+ HDRB value sent by the SPI host on MOSI.
+
+WNR (Bit 29) - Write-Not-Read. This bit indicates if data is to be written
+ to registers (when set) or read from registers
+ (when clear).
+
+AID (Bit 28) - Address Increment Disable. When clear, the address will be
+ automatically post-incremented by one following each
+ register read or write. When set, address auto increment is
+ disabled allowing successive reads and writes to occur at
+ the same register address.
+
+MMS (Bit 27..24) - Memory Map Selector. This field selects the specific
+ register memory map to access.
+
+ADDR (Bit 23..8) - Address. Address of the first register within the
+ selected memory map to access.
+
+LEN (Bit 7..1) - Length. Specifies the number of registers to read/write.
+ This field is interpreted as the number of registers
+ minus 1 allowing for up to 128 consecutive registers read
+ or written starting at the address specified in ADDR. A
+ length of zero shall read or write a single register.
+
+P (Bit 0) - Parity. Parity bit calculated over the control command header.
+ Method used is odd parity.
+
+Control transactions consist of one or more control commands. Control
+commands are used by the SPI host to read and write registers within the
+MAC-PHY. Each control commands are composed of a 4 bytes control command
+header followed by register write data in case of control write command.
+
+The MAC-PHY ignores the final 4 bytes of data from the SPI host at the end
+of the control write command. The control write command is also echoed
+from the MAC-PHY back to the SPI host to identify which register write
+failed in case of any bus errors. The echoed Control write command will
+have the first 4 bytes unused value to be ignored by the SPI host
+followed by 4 bytes echoed control header followed by echoed register
+write data. Control write commands can write either a single register or
+multiple consecutive registers. When multiple consecutive registers are
+written, the address is automatically post-incremented by the MAC-PHY.
+Writing to any unimplemented or undefined registers shall be ignored and
+yield no effect.
+
+The MAC-PHY ignores all data from the SPI host following the control
+header for the remainder of the control read command. The control read
+command is also echoed from the MAC-PHY back to the SPI host to identify
+which register read is failed in case of any bus errors. The echoed
+Control read command will have the first 4 bytes of unused value to be
+ignored by the SPI host followed by 4 bytes echoed control header followed
+by register read data. Control read commands can read either a single
+register or multiple consecutive registers. When multiple consecutive
+registers are read, the address is automatically post-incremented by the
+MAC-PHY. Reading any unimplemented or undefined registers shall return
+zero.
+
+Device drivers API
+==================
+
+The include/linux/oa_tc6.h defines the following functions:
+
+.. c:function:: struct oa_tc6 *oa_tc6_init(struct spi_device *spi, \
+ struct net_device *netdev)
+
+Initialize OA TC6 lib.
+
+.. c:function:: void oa_tc6_exit(struct oa_tc6 *tc6)
+
+Free allocated OA TC6 lib.
+
+.. c:function:: int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, \
+ u32 value)
+
+Write a single register in the MAC-PHY.
+
+.. c:function:: int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, \
+ u32 value[], u8 length)
+
+Writing multiple consecutive registers starting from @address in the MAC-PHY.
+Maximum of 128 consecutive registers can be written starting at @address.
+
+.. c:function:: int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, \
+ u32 *value)
+
+Read a single register in the MAC-PHY.
+
+.. c:function:: int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, \
+ u32 value[], u8 length)
+
+Reading multiple consecutive registers starting from @address in the MAC-PHY.
+Maximum of 128 consecutive registers can be read starting at @address.
+
+.. c:function:: netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, \
+ struct sk_buff *skb);
+
+The transmit Ethernet frame in the skb is or going to be transmitted through
+the MAC-PHY.
+
+.. c:function:: int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
+
+Zero align receive frame feature can be enabled to align all receive ethernet
+frames data to start at the beginning of any receive data chunk payload with a
+start word offset (SWO) of zero.
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 5e93cd71f99f..8199e6917671 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -158,7 +158,8 @@ SOF_TIMESTAMPING_SYS_HARDWARE:
SOF_TIMESTAMPING_RAW_HARDWARE:
Report hardware timestamps as generated by
- SOF_TIMESTAMPING_TX_HARDWARE when available.
+ SOF_TIMESTAMPING_TX_HARDWARE or SOF_TIMESTAMPING_RX_HARDWARE
+ when available.
1.3.3 Timestamp Options
@@ -266,6 +267,23 @@ SOF_TIMESTAMPING_OPT_TX_SWHW:
two separate messages will be looped to the socket's error queue,
each containing just one timestamp.
+SOF_TIMESTAMPING_OPT_RX_FILTER:
+ Filter out spurious receive timestamps: report a receive timestamp
+ only if the matching timestamp generation flag is enabled.
+
+ Receive timestamps are generated early in the ingress path, before a
+ packet's destination socket is known. If any socket enables receive
+ timestamps, packets for all socket will receive timestamped packets.
+ Including those that request timestamp reporting with
+ SOF_TIMESTAMPING_SOFTWARE and/or SOF_TIMESTAMPING_RAW_HARDWARE, but
+ do not request receive timestamp generation. This can happen when
+ requesting transmit timestamps only.
+
+ Receiving spurious timestamps is generally benign. A process can
+ ignore the unexpected non-zero value. But it makes behavior subtly
+ dependent on other sockets. This flag isolates the socket for more
+ deterministic behavior.
+
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
diff --git a/Documentation/virt/hyperv/coco.rst b/Documentation/virt/hyperv/coco.rst
new file mode 100644
index 000000000000..c15d6fe34b4e
--- /dev/null
+++ b/Documentation/virt/hyperv/coco.rst
@@ -0,0 +1,260 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Confidential Computing VMs
+==========================
+Hyper-V can create and run Linux guests that are Confidential Computing
+(CoCo) VMs. Such VMs cooperate with the physical processor to better protect
+the confidentiality and integrity of data in the VM's memory, even in the
+face of a hypervisor/VMM that has been compromised and may behave maliciously.
+CoCo VMs on Hyper-V share the generic CoCo VM threat model and security
+objectives described in Documentation/security/snp-tdx-threat-model.rst. Note
+that Hyper-V specific code in Linux refers to CoCo VMs as "isolated VMs" or
+"isolation VMs".
+
+A Linux CoCo VM on Hyper-V requires the cooperation and interaction of the
+following:
+
+* Physical hardware with a processor that supports CoCo VMs
+
+* The hardware runs a version of Windows/Hyper-V with support for CoCo VMs
+
+* The VM runs a version of Linux that supports being a CoCo VM
+
+The physical hardware requirements are as follows:
+
+* AMD processor with SEV-SNP. Hyper-V does not run guest VMs with AMD SME,
+ SEV, or SEV-ES encryption, and such encryption is not sufficient for a CoCo
+ VM on Hyper-V.
+
+* Intel processor with TDX
+
+To create a CoCo VM, the "Isolated VM" attribute must be specified to Hyper-V
+when the VM is created. A VM cannot be changed from a CoCo VM to a normal VM,
+or vice versa, after it is created.
+
+Operational Modes
+-----------------
+Hyper-V CoCo VMs can run in two modes. The mode is selected when the VM is
+created and cannot be changed during the life of the VM.
+
+* Fully-enlightened mode. In this mode, the guest operating system is
+ enlightened to understand and manage all aspects of running as a CoCo VM.
+
+* Paravisor mode. In this mode, a paravisor layer between the guest and the
+ host provides some operations needed to run as a CoCo VM. The guest operating
+ system can have fewer CoCo enlightenments than is required in the
+ fully-enlightened case.
+
+Conceptually, fully-enlightened mode and paravisor mode may be treated as
+points on a spectrum spanning the degree of guest enlightenment needed to run
+as a CoCo VM. Fully-enlightened mode is one end of the spectrum. A full
+implementation of paravisor mode is the other end of the spectrum, where all
+aspects of running as a CoCo VM are handled by the paravisor, and a normal
+guest OS with no knowledge of memory encryption or other aspects of CoCo VMs
+can run successfully. However, the Hyper-V implementation of paravisor mode
+does not go this far, and is somewhere in the middle of the spectrum. Some
+aspects of CoCo VMs are handled by the Hyper-V paravisor while the guest OS
+must be enlightened for other aspects. Unfortunately, there is no
+standardized enumeration of feature/functions that might be provided in the
+paravisor, and there is no standardized mechanism for a guest OS to query the
+paravisor for the feature/functions it provides. The understanding of what
+the paravisor provides is hard-coded in the guest OS.
+
+Paravisor mode has similarities to the `Coconut project`_, which aims to provide
+a limited paravisor to provide services to the guest such as a virtual TPM.
+However, the Hyper-V paravisor generally handles more aspects of CoCo VMs
+than is currently envisioned for Coconut, and so is further toward the "no
+guest enlightenments required" end of the spectrum.
+
+.. _Coconut project: https://github.com/coconut-svsm/svsm
+
+In the CoCo VM threat model, the paravisor is in the guest security domain
+and must be trusted by the guest OS. By implication, the hypervisor/VMM must
+protect itself against a potentially malicious paravisor just like it
+protects against a potentially malicious guest.
+
+The hardware architectural approach to fully-enlightened vs. paravisor mode
+varies depending on the underlying processor.
+
+* With AMD SEV-SNP processors, in fully-enlightened mode the guest OS runs in
+ VMPL 0 and has full control of the guest context. In paravisor mode, the
+ guest OS runs in VMPL 2 and the paravisor runs in VMPL 0. The paravisor
+ running in VMPL 0 has privileges that the guest OS in VMPL 2 does not have.
+ Certain operations require the guest to invoke the paravisor. Furthermore, in
+ paravisor mode the guest OS operates in "virtual Top Of Memory" (vTOM) mode
+ as defined by the SEV-SNP architecture. This mode simplifies guest management
+ of memory encryption when a paravisor is used.
+
+* With Intel TDX processor, in fully-enlightened mode the guest OS runs in an
+ L1 VM. In paravisor mode, TD partitioning is used. The paravisor runs in the
+ L1 VM, and the guest OS runs in a nested L2 VM.
+
+Hyper-V exposes a synthetic MSR to guests that describes the CoCo mode. This
+MSR indicates if the underlying processor uses AMD SEV-SNP or Intel TDX, and
+whether a paravisor is being used. It is straightforward to build a single
+kernel image that can boot and run properly on either architecture, and in
+either mode.
+
+Paravisor Effects
+-----------------
+Running in paravisor mode affects the following areas of generic Linux kernel
+CoCo VM functionality:
+
+* Initial guest memory setup. When a new VM is created in paravisor mode, the
+ paravisor runs first and sets up the guest physical memory as encrypted. The
+ guest Linux does normal memory initialization, except for explicitly marking
+ appropriate ranges as decrypted (shared). In paravisor mode, Linux does not
+ perform the early boot memory setup steps that are particularly tricky with
+ AMD SEV-SNP in fully-enlightened mode.
+
+* #VC/#VE exception handling. In paravisor mode, Hyper-V configures the guest
+ CoCo VM to route #VC and #VE exceptions to VMPL 0 and the L1 VM,
+ respectively, and not the guest Linux. Consequently, these exception handlers
+ do not run in the guest Linux and are not a required enlightenment for a
+ Linux guest in paravisor mode.
+
+* CPUID flags. Both AMD SEV-SNP and Intel TDX provide a CPUID flag in the
+ guest indicating that the VM is operating with the respective hardware
+ support. While these CPUID flags are visible in fully-enlightened CoCo VMs,
+ the paravisor filters out these flags and the guest Linux does not see them.
+ Throughout the Linux kernel, explicitly testing these flags has mostly been
+ eliminated in favor of the cc_platform_has() function, with the goal of
+ abstracting the differences between SEV-SNP and TDX. But the
+ cc_platform_has() abstraction also allows the Hyper-V paravisor configuration
+ to selectively enable aspects of CoCo VM functionality even when the CPUID
+ flags are not set. The exception is early boot memory setup on SEV-SNP, which
+ tests the CPUID SEV-SNP flag. But not having the flag in Hyper-V paravisor
+ mode VM achieves the desired effect or not running SEV-SNP specific early
+ boot memory setup.
+
+* Device emulation. In paravisor mode, the Hyper-V paravisor provides
+ emulation of devices such as the IO-APIC and TPM. Because the emulation
+ happens in the paravisor in the guest context (instead of the hypervisor/VMM
+ context), MMIO accesses to these devices must be encrypted references instead
+ of the decrypted references that would be used in a fully-enlightened CoCo
+ VM. The __ioremap_caller() function has been enhanced to make a callback to
+ check whether a particular address range should be treated as encrypted
+ (private). See the "is_private_mmio" callback.
+
+* Encrypt/decrypt memory transitions. In a CoCo VM, transitioning guest
+ memory between encrypted and decrypted requires coordinating with the
+ hypervisor/VMM. This is done via callbacks invoked from
+ __set_memory_enc_pgtable(). In fully-enlightened mode, the normal SEV-SNP and
+ TDX implementations of these callbacks are used. In paravisor mode, a Hyper-V
+ specific set of callbacks is used. These callbacks invoke the paravisor so
+ that the paravisor can coordinate the transitions and inform the hypervisor
+ as necessary. See hv_vtom_init() where these callback are set up.
+
+* Interrupt injection. In fully enlightened mode, a malicious hypervisor
+ could inject interrupts into the guest OS at times that violate x86/x64
+ architectural rules. For full protection, the guest OS should include
+ enlightenments that use the interrupt injection management features provided
+ by CoCo-capable processors. In paravisor mode, the paravisor mediates
+ interrupt injection into the guest OS, and ensures that the guest OS only
+ sees interrupts that are "legal". The paravisor uses the interrupt injection
+ management features provided by the CoCo-capable physical processor, thereby
+ masking these complexities from the guest OS.
+
+Hyper-V Hypercalls
+------------------
+When in fully-enlightened mode, hypercalls made by the Linux guest are routed
+directly to the hypervisor, just as in a non-CoCo VM. But in paravisor mode,
+normal hypercalls trap to the paravisor first, which may in turn invoke the
+hypervisor. But the paravisor is idiosyncratic in this regard, and a few
+hypercalls made by the Linux guest must always be routed directly to the
+hypervisor. These hypercall sites test for a paravisor being present, and use
+a special invocation sequence. See hv_post_message(), for example.
+
+Guest communication with Hyper-V
+--------------------------------
+Separate from the generic Linux kernel handling of memory encryption in Linux
+CoCo VMs, Hyper-V has VMBus and VMBus devices that communicate using memory
+shared between the Linux guest and the host. This shared memory must be
+marked decrypted to enable communication. Furthermore, since the threat model
+includes a compromised and potentially malicious host, the guest must guard
+against leaking any unintended data to the host through this shared memory.
+
+These Hyper-V and VMBus memory pages are marked as decrypted:
+
+* VMBus monitor pages
+
+* Synthetic interrupt controller (synic) related pages (unless supplied by
+ the paravisor)
+
+* Per-cpu hypercall input and output pages (unless running with a paravisor)
+
+* VMBus ring buffers. The direct mapping is marked decrypted in
+ __vmbus_establish_gpadl(). The secondary mapping created in
+ hv_ringbuffer_init() must also include the "decrypted" attribute.
+
+When the guest writes data to memory that is shared with the host, it must
+ensure that only the intended data is written. Padding or unused fields must
+be initialized to zeros before copying into the shared memory so that random
+kernel data is not inadvertently given to the host.
+
+Similarly, when the guest reads memory that is shared with the host, it must
+validate the data before acting on it so that a malicious host cannot induce
+the guest to expose unintended data. Doing such validation can be tricky
+because the host can modify the shared memory areas even while or after
+validation is performed. For messages passed from the host to the guest in a
+VMBus ring buffer, the length of the message is validated, and the message is
+copied into a temporary (encrypted) buffer for further validation and
+processing. The copying adds a small amount of overhead, but is the only way
+to protect against a malicious host. See hv_pkt_iter_first().
+
+Many drivers for VMBus devices have been "hardened" by adding code to fully
+validate messages received over VMBus, instead of assuming that Hyper-V is
+acting cooperatively. Such drivers are marked as "allowed_in_isolated" in the
+vmbus_devs[] table. Other drivers for VMBus devices that are not needed in a
+CoCo VM have not been hardened, and they are not allowed to load in a CoCo
+VM. See vmbus_is_valid_offer() where such devices are excluded.
+
+Two VMBus devices depend on the Hyper-V host to do DMA data transfers:
+storvsc for disk I/O and netvsc for network I/O. storvsc uses the normal
+Linux kernel DMA APIs, and so bounce buffering through decrypted swiotlb
+memory is done implicitly. netvsc has two modes for data transfers. The first
+mode goes through send and receive buffer space that is explicitly allocated
+by the netvsc driver, and is used for most smaller packets. These send and
+receive buffers are marked decrypted by __vmbus_establish_gpadl(). Because
+the netvsc driver explicitly copies packets to/from these buffers, the
+equivalent of bounce buffering between encrypted and decrypted memory is
+already part of the data path. The second mode uses the normal Linux kernel
+DMA APIs, and is bounce buffered through swiotlb memory implicitly like in
+storvsc.
+
+Finally, the VMBus virtual PCI driver needs special handling in a CoCo VM.
+Linux PCI device drivers access PCI config space using standard APIs provided
+by the Linux PCI subsystem. On Hyper-V, these functions directly access MMIO
+space, and the access traps to Hyper-V for emulation. But in CoCo VMs, memory
+encryption prevents Hyper-V from reading the guest instruction stream to
+emulate the access. So in a CoCo VM, these functions must make a hypercall
+with arguments explicitly describing the access. See
+_hv_pcifront_read_config() and _hv_pcifront_write_config() and the
+"use_calls" flag indicating to use hypercalls.
+
+load_unaligned_zeropad()
+------------------------
+When transitioning memory between encrypted and decrypted, the caller of
+set_memory_encrypted() or set_memory_decrypted() is responsible for ensuring
+the memory isn't in use and isn't referenced while the transition is in
+progress. The transition has multiple steps, and includes interaction with
+the Hyper-V host. The memory is in an inconsistent state until all steps are
+complete. A reference while the state is inconsistent could result in an
+exception that can't be cleanly fixed up.
+
+However, the kernel load_unaligned_zeropad() mechanism may make stray
+references that can't be prevented by the caller of set_memory_encrypted() or
+set_memory_decrypted(), so there's specific code in the #VC or #VE exception
+handler to fixup this case. But a CoCo VM running on Hyper-V may be
+configured to run with a paravisor, with the #VC or #VE exception routed to
+the paravisor. There's no architectural way to forward the exceptions back to
+the guest kernel, and in such a case, the load_unaligned_zeropad() fixup code
+in the #VC/#VE handlers doesn't run.
+
+To avoid this problem, the Hyper-V specific functions for notifying the
+hypervisor of the transition mark pages as "not present" while a transition
+is in progress. If load_unaligned_zeropad() causes a stray reference, a
+normal page fault is generated instead of #VC or #VE, and the page-fault-
+based handlers for load_unaligned_zeropad() fixup the reference. When the
+encrypted/decrypted transition is complete, the pages are marked as "present"
+again. See hv_vtom_clear_present() and hv_vtom_set_host_visibility().
diff --git a/Documentation/virt/hyperv/index.rst b/Documentation/virt/hyperv/index.rst
index de447e11b4a5..79bc4080329e 100644
--- a/Documentation/virt/hyperv/index.rst
+++ b/Documentation/virt/hyperv/index.rst
@@ -11,3 +11,4 @@ Hyper-V Enlightenments
vmbus
clocks
vpci
+ coco
diff --git a/MAINTAINERS b/MAINTAINERS
index 87f9ff0d1649..0b1c49bf558d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7473,8 +7473,8 @@ S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
+F: drivers/gpu/drm/display/drm_bridge_connector.c
F: drivers/gpu/drm/drm_bridge.c
-F: drivers/gpu/drm/drm_bridge_connector.c
F: include/drm/drm_bridge.h
F: include/drm/drm_bridge_connector.h
@@ -14987,6 +14987,13 @@ L: [email protected]
S: Maintained
F: drivers/net/ethernet/microchip/lan743x_*
+MICROCHIP LAN8650/1 10BASE-T1S MACPHY ETHERNET DRIVER
+M: Parthiban Veerasooran <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/net/microchip,lan8650.yaml
+F: drivers/net/ethernet/microchip/lan865x/lan865x.c
+
MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER
M: Arun Ramadoss <[email protected]>
@@ -17120,6 +17127,14 @@ L: [email protected]
S: Supported
F: drivers/infiniband/ulp/opa_vnic
+OPEN ALLIANCE 10BASE-T1S MACPHY SERIAL INTERFACE FRAMEWORK
+M: Parthiban Veerasooran <[email protected]>
+S: Maintained
+F: Documentation/networking/oa-tc6-framework.rst
+F: drivers/include/linux/oa_tc6.h
+F: drivers/net/ethernet/oa_tc6.c
+
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <[email protected]>
M: Saravana Kannan <[email protected]>
@@ -18426,6 +18441,7 @@ L: [email protected]
S: Maintained
F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/
+F: net/ethtool/pse-pd.c
PSTORE FILESYSTEM
M: Kees Cook <[email protected]>
diff --git a/Makefile b/Makefile
index b1f3bf584d4a..2966d5fbbf17 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index e94f621903fe..251b73c5481e 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -140,6 +140,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index a608a219543e..3e08e2fd0a78 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -387,7 +387,7 @@
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index ccbe3a7a1d2c..d24444cdf54a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -154,6 +154,22 @@
};
};
+&gpio3 {
+ /*
+ * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
+ * eMMC and SPI flash powered-down initially (in fact it keeps the
+ * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
+ * that signal so that eMMC and SPI can be used regardless of the state
+ * of the signal.
+ */
+ bios-disable-override-hog {
+ gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
+ gpio-hog;
+ line-name = "bios_disable_override";
+ output-high;
+ };
+};
+
&gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>;
@@ -409,6 +425,7 @@
&i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>;
+ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
status = "okay";
@@ -417,8 +434,8 @@
/*
* As Q7 does not specify neither a global nor a RX clock for I2S these
* signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent
- * conflicts.
+ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
+ * definitions to prevent conflicts.
*/
&i2s0_2ch_bus {
rockchip,pins =
@@ -428,6 +445,14 @@
<3 RK_PD7 1 &pcfg_pull_none>;
};
+&i2s0_2ch_bus_bclk_off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>;
+};
+
&io_domains {
status = "okay";
bt656-supply = <&vcc_1v8>;
@@ -449,9 +474,14 @@
&pinctrl {
pinctrl-names = "default";
- pinctrl-0 = <&q7_thermal_pin>;
+ pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios {
+ bios_disable_override_hog_pin: bios-disable-override-hog-pin {
+ rockchip,pins =
+ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index 4690be841a1c..c72b3a608edd 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -1592,10 +1592,9 @@
<&cru SRST_TSADCPHY>;
rockchip,grf = <&grf>;
rockchip,hw-tshut-temp = <95000>;
- pinctrl-names = "init", "default", "sleep";
- pinctrl-0 = <&tsadc_pin>;
- pinctrl-1 = <&tsadc_shutorg>;
- pinctrl-2 = <&tsadc_pin>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&tsadc_shutorg>;
+ pinctrl-1 = <&tsadc_pin>;
#thermal-sensor-cells = <1>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index b6e4df180f0b..ee99166ebd46 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -582,14 +582,14 @@
};
vo0_grf: syscon@fd5a6000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
+ compatible = "rockchip,rk3588-vo0-grf", "syscon";
reg = <0x0 0xfd5a6000 0x0 0x2000>;
clocks = <&cru PCLK_VO0GRF>;
};
vo1_grf: syscon@fd5a8000 {
- compatible = "rockchip,rk3588-vo-grf", "syscon";
- reg = <0x0 0xfd5a8000 0x0 0x100>;
+ compatible = "rockchip,rk3588-vo1-grf", "syscon";
+ reg = <0x0 0xfd5a8000 0x0 0x4000>;
clocks = <&cru PCLK_VO1GRF>;
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 6b3258860377..2729faaee4b4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -25,6 +25,7 @@
*
* @common: Common unwind state.
* @task: The task being unwound.
+ * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr
* value.
@@ -32,6 +33,7 @@
struct kunwind_state {
struct unwind_state common;
struct task_struct *task;
+ int graph_idx;
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
@@ -106,7 +108,7 @@ kunwind_recover_return_address(struct kunwind_state *state)
if (state->task->ret_stack &&
(state->common.pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
- orig_pc = ftrace_graph_ret_addr(state->task, NULL,
+ orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->common.pc,
(void *)state->common.fp);
if (WARN_ON_ONCE(state->common.pc == orig_pc))
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 60ebaed28a4c..8ab7582291ab 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -151,6 +151,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index be264c2b1a11..38fc0b188e08 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -132,6 +132,12 @@
#define SO_PASSPIDFD 0x404A
#define SO_PEERPIDFD 0x404B
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 9508399dd036..b481738c4bb5 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -52,7 +52,7 @@
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define pgd_ERROR(e) \
- pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e))
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
@@ -170,7 +170,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
- ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
+ ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#endif
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index 7b3d4c592a10..f3086e39e7d2 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -49,16 +49,22 @@ static inline unsigned long pud_val(pud_t x)
#endif /* CONFIG_PPC64 */
/* PGD level */
-#if defined(CONFIG_PPC_E500) && defined(CONFIG_PTE_64BIT)
+#if defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
typedef struct { unsigned long long pgd; } pgd_t;
+
+static inline unsigned long long pgd_val(pgd_t x)
+{
+ return x.pgd;
+}
#else
typedef struct { unsigned long pgd; } pgd_t;
-#endif
-#define __pgd(x) ((pgd_t) { (x) })
+
static inline unsigned long pgd_val(pgd_t x)
{
return x.pgd;
}
+#endif
+#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 426e1ccc6971..8f57107000a2 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -74,6 +74,8 @@ SECTIONS
.got : { *(.got) } :text
.plt : { *(.plt) }
+ .rela.dyn : { *(.rela .rela*) }
+
_end = .;
__end = .;
PROVIDE(end = .);
@@ -87,7 +89,7 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.got1 .glink .iplt .rela*)
+ *(.got1 .glink .iplt)
}
}
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index bda6c8cdd459..400819258c06 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -69,7 +69,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) }
- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
+ .rela.dyn ALIGN(8) : { *(.rela .rela*) }
.got ALIGN(8) : { *(.got .toc) }
@@ -86,7 +86,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
*(.opd)
- *(.glink .iplt .plt .rela*)
+ *(.glink .iplt .plt)
}
}
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 5de4dd549f6e..bcc7e4dff8c3 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -697,7 +697,15 @@ again:
}
release:
- qnodesp->count--; /* release the node */
+ /*
+ * Clear the lock before releasing the node, as another CPU might see stale
+ * values if an interrupt occurs after we increment qnodesp->count
+ * but before node->lock is initialized. The barrier ensures that
+ * there are no further stores to the node after it has been released.
+ */
+ node->lock = NULL;
+ barrier();
+ qnodesp->count--;
}
void queued_spin_lock_slowpath(struct qspinlock *lock)
diff --git a/arch/powerpc/mm/nohash/tlb_64e.c b/arch/powerpc/mm/nohash/tlb_64e.c
index 113edf76d3ce..d26656b07b72 100644
--- a/arch/powerpc/mm/nohash/tlb_64e.c
+++ b/arch/powerpc/mm/nohash/tlb_64e.c
@@ -33,7 +33,7 @@
* though this will probably be made common with other nohash
* implementations at some point
*/
-int mmu_pte_psize; /* Page size used for PTE pages */
+static int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 0f3cd7c3a436..939ea7f6a228 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -552,8 +552,8 @@ config RISCV_ISA_SVPBMT
config TOOLCHAIN_HAS_V
bool
default y
- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv)
- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv)
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
depends on AS_HAS_OPTION_ARCH
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
index ca2d44d59d48..c7771b3b6475 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -365,6 +365,12 @@
};
};
+&syscrg {
+ assigned-clocks = <&syscrg JH7110_SYSCLK_CPU_CORE>,
+ <&pllclk JH7110_PLLCLK_PLL0_OUT>;
+ assigned-clock-rates = <500000000>, <1500000000>;
+};
+
&sysgpio {
i2c0_pins: i2c0-0 {
i2c-pins {
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 8702b8721a27..efa1b3519b23 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -14,36 +14,14 @@
#include <asm/ptrace.h>
-/*
- * addr is a hint to the maximum userspace address that mmap should provide, so
- * this macro needs to return the largest address space available so that
- * mmap_end < addr, being mmap_end the top of that address space.
- * See Documentation/arch/riscv/vm-layout.rst for more details.
- */
#define arch_get_mmap_end(addr, len, flags) \
({ \
- unsigned long mmap_end; \
- typeof(addr) _addr = (addr); \
- if ((_addr) == 0 || is_compat_task() || \
- ((_addr + len) > BIT(VA_BITS - 1))) \
- mmap_end = STACK_TOP_MAX; \
- else \
- mmap_end = (_addr + len); \
- mmap_end; \
+ STACK_TOP_MAX; \
})
#define arch_get_mmap_base(addr, base) \
({ \
- unsigned long mmap_base; \
- typeof(addr) _addr = (addr); \
- typeof(base) _base = (base); \
- unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
- if ((_addr) == 0 || is_compat_task() || \
- ((_addr + len) > BIT(VA_BITS - 1))) \
- mmap_base = (_base); \
- else \
- mmap_base = (_addr + len) - rnd_gap; \
- mmap_base; \
+ base; \
})
#ifdef CONFIG_64BIT
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 7cffd4ffecd0..7bd3746028c9 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/cpumask.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_RISCV_SBI
enum sbi_ext_id {
@@ -304,6 +305,7 @@ struct sbiret {
};
void sbi_init(void);
+long __sbi_base_ecall(int fid);
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
@@ -373,7 +375,23 @@ static inline unsigned long sbi_mk_version(unsigned long major,
| (minor & SBI_SPEC_VERSION_MINOR_MASK);
}
-int sbi_err_map_linux_errno(int err);
+static inline int sbi_err_map_linux_errno(int err)
+{
+ switch (err) {
+ case SBI_SUCCESS:
+ return 0;
+ case SBI_ERR_DENIED:
+ return -EPERM;
+ case SBI_ERR_INVALID_PARAM:
+ return -EINVAL;
+ case SBI_ERR_INVALID_ADDRESS:
+ return -EFAULT;
+ case SBI_ERR_NOT_SUPPORTED:
+ case SBI_ERR_FAILURE:
+ default:
+ return -ENOTSUPP;
+ };
+}
extern bool sbi_debug_console_available;
int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 06d407f1b30b..7f88cc4931f5 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -20,17 +20,21 @@ endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
CFLAGS_alternative.o := -mcmodel=medany
CFLAGS_cpufeature.o := -mcmodel=medany
+CFLAGS_sbi_ecall.o := -mcmodel=medany
ifdef CONFIG_FTRACE
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sbi_ecall.o = $(CC_FLAGS_FTRACE)
endif
ifdef CONFIG_RELOCATABLE
CFLAGS_alternative.o += -fno-pie
CFLAGS_cpufeature.o += -fno-pie
+CFLAGS_sbi_ecall.o += -fno-pie
endif
ifdef CONFIG_KASAN
KASAN_SANITIZE_alternative.o := n
KASAN_SANITIZE_cpufeature.o := n
+KASAN_SANITIZE_sbi_ecall.o := n
endif
endif
@@ -88,7 +92,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
-obj-$(CONFIG_RISCV_SBI) += sbi.o
+obj-$(CONFIG_RISCV_SBI) += sbi.o sbi_ecall.o
ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += sbi-ipi.o
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 837bdab2601b..1989b8cade1b 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -14,9 +14,6 @@
#include <asm/smp.h>
#include <asm/tlbflush.h>
-#define CREATE_TRACE_POINTS
-#include <asm/trace.h>
-
/* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version);
@@ -27,55 +24,6 @@ static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5) __ro_after_init;
-struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
- unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5,
- int fid, int ext)
-{
- struct sbiret ret;
-
- trace_sbi_call(ext, fid);
-
- register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
- register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
- register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
- register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
- register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
- register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
- register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
- register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
- asm volatile ("ecall"
- : "+r" (a0), "+r" (a1)
- : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
- : "memory");
- ret.error = a0;
- ret.value = a1;
-
- trace_sbi_return(ext, ret.error, ret.value);
-
- return ret;
-}
-EXPORT_SYMBOL(__sbi_ecall);
-
-int sbi_err_map_linux_errno(int err)
-{
- switch (err) {
- case SBI_SUCCESS:
- return 0;
- case SBI_ERR_DENIED:
- return -EPERM;
- case SBI_ERR_INVALID_PARAM:
- return -EINVAL;
- case SBI_ERR_INVALID_ADDRESS:
- return -EFAULT;
- case SBI_ERR_NOT_SUPPORTED:
- case SBI_ERR_FAILURE:
- default:
- return -ENOTSUPP;
- };
-}
-EXPORT_SYMBOL(sbi_err_map_linux_errno);
-
#ifdef CONFIG_RISCV_SBI_V01
static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
{
@@ -535,17 +483,6 @@ long sbi_probe_extension(int extid)
}
EXPORT_SYMBOL(sbi_probe_extension);
-static long __sbi_base_ecall(int fid)
-{
- struct sbiret ret;
-
- ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
- if (!ret.error)
- return ret.value;
- else
- return sbi_err_map_linux_errno(ret.error);
-}
-
static inline long sbi_get_spec_version(void)
{
return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
diff --git a/arch/riscv/kernel/sbi_ecall.c b/arch/riscv/kernel/sbi_ecall.c
new file mode 100644
index 000000000000..24aabb4fbde3
--- /dev/null
+++ b/arch/riscv/kernel/sbi_ecall.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Rivos Inc. */
+
+#include <asm/sbi.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace.h>
+
+long __sbi_base_ecall(int fid)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
+ if (!ret.error)
+ return ret.value;
+ else
+ return sbi_err_map_linux_errno(ret.error);
+}
+EXPORT_SYMBOL(__sbi_base_ecall);
+
+struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5,
+ int fid, int ext)
+{
+ struct sbiret ret;
+
+ trace_sbi_call(ext, fid);
+
+ register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
+ register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
+ register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
+ register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
+ register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
+ register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
+ register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
+ register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
+ asm volatile ("ecall"
+ : "+r" (a0), "+r" (a1)
+ : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
+ : "memory");
+ ret.error = a0;
+ ret.value = a1;
+
+ trace_sbi_return(ext, ret.error, ret.value);
+
+ return ret;
+}
+EXPORT_SYMBOL(__sbi_ecall);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 192cd5603e95..d4fd8af7aaf5 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -417,7 +417,7 @@ int handle_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
- if (raw_copy_from_user(&val, (u8 __user *)addr, len))
+ if (copy_from_user(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
@@ -515,7 +515,7 @@ int handle_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
- if (raw_copy_to_user((u8 __user *)addr, &val, len))
+ if (copy_to_user((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index eb0649a61b4c..1785782c2e55 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -252,7 +252,7 @@ static void __init setup_bootmem(void)
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
- if (IS_ENABLED(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 682da3714686..57084ed2f3c4 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -133,6 +133,12 @@
#define SO_PASSPIDFD 0x0055
#define SO_PEERPIDFD 0x0056
+#define SO_DEVMEM_LINEAR 0x0057
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 0x0058
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 0x0059
+
#if !defined(__KERNEL__)
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 17a71e92a343..95eada2994e1 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -35,7 +35,6 @@
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
-int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
EXPORT_SYMBOL_GPL(hv_current_partition_id);
@@ -607,8 +606,6 @@ skip_hypercall_pg_init:
register_syscore_ops(&hv_syscore_ops);
- hyperv_init_cpuhp = cpuhp;
-
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
hv_get_partition_id();
@@ -637,7 +634,7 @@ skip_hypercall_pg_init:
clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
- cpuhp_remove_state(cpuhp);
+ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
free_ghcb_page:
free_percpu(hv_ghcb_pg);
free_vp_assist_page:
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 390c4d13956d..5f0bc6a6d025 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
}
#if IS_ENABLED(CONFIG_HYPERV)
-extern int hyperv_init_cpuhp;
extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index e0fd57a8ba84..ead967479fa6 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -199,8 +199,8 @@ static void hv_machine_shutdown(void)
* Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
* corrupts the old VP Assist Pages and can crash the kexec kernel.
*/
- if (kexec_in_progress && hyperv_init_cpuhp > 0)
- cpuhp_remove_state(hyperv_init_cpuhp);
+ if (kexec_in_progress)
+ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
/* The function calls stop_other_cpus(). */
native_machine_shutdown();
@@ -424,6 +424,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
}
if (ms_hyperv.priv_high & HV_ISOLATION) {
@@ -449,9 +450,23 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
if (!ms_hyperv.paravisor_present) {
- /* To be supported: more work is required. */
+ /*
+ * Mark the Hyper-V TSC page feature as disabled
+ * in a TDX VM without paravisor so that the
+ * Invariant TSC, which is a better clocksource
+ * anyway, is used instead.
+ */
ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
+ /*
+ * The Invariant TSC is expected to be available
+ * in a TDX VM without paravisor, but if not,
+ * print a warning message. The slower Hyper-V MSR-based
+ * Ref Counter should end up being the clocksource.
+ */
+ if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
+ pr_warn("Hyper-V: Invariant TSC is unavailable\n");
+
/* HV_MSR_CRASH_CTL is unsupported. */
ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 472a1537b7a9..730c2f34d347 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -19,7 +19,6 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
- depends on HIGH_RES_TIMERS
depends on X86_LOCAL_APIC
select KVM_COMMON
select KVM_GENERIC_MMU_NOTIFIER
@@ -144,8 +143,10 @@ config KVM_AMD_SEV
select HAVE_KVM_ARCH_GMEM_PREPARE
select HAVE_KVM_ARCH_GMEM_INVALIDATE
help
- Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
- with Encrypted State (SEV-ES) on AMD processors.
+ Provides support for launching encrypted VMs which use Secure
+ Encrypted Virtualization (SEV), Secure Encrypted Virtualization with
+ Encrypted State (SEV-ES), and Secure Encrypted Virtualization with
+ Secure Nested Paging (SEV-SNP) technologies on AMD processors.
config KVM_SMM
bool "System Management Mode emulation"
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 928cf84778b0..de05a26b0b7d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4750,7 +4750,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
* reload is efficient when called repeatedly, so we can do it on
* every iteration.
*/
- kvm_mmu_reload(vcpu);
+ r = kvm_mmu_reload(vcpu);
+ if (r)
+ return r;
if (kvm_arch_has_private_mem(vcpu->kvm) &&
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index d4527965e48c..8f7eb3ad88fc 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -391,9 +391,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
mmio_value = 0;
/*
- * The masked MMIO value must obviously match itself and a removed SPTE
- * must not get a false positive. Removed SPTEs and MMIO SPTEs should
- * never collide as MMIO must set some RWX bits, and removed SPTEs must
+ * The masked MMIO value must obviously match itself and a frozen SPTE
+ * must not get a false positive. Frozen SPTEs and MMIO SPTEs should
+ * never collide as MMIO must set some RWX bits, and frozen SPTEs must
* not set any RWX bits.
*/
if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index ef793c459b05..2cb816ea2430 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -214,7 +214,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/
#define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
-/* Removed SPTEs must not be misconstrued as shadow present PTEs. */
+/* Frozen SPTEs must not be misconstrued as shadow present PTEs. */
static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK));
static inline bool is_frozen_spte(u64 spte)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index c7dc49ee7388..3c55955bcaf8 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -359,10 +359,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
/*
* Set the SPTE to a nonpresent value that other
* threads will not overwrite. If the SPTE was
- * already marked as removed then another thread
+ * already marked as frozen then another thread
* handling a page fault could overwrite it, so
* set the SPTE until it is set from some other
- * value to the removed SPTE value.
+ * value to the frozen SPTE value.
*/
for (;;) {
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
@@ -536,8 +536,8 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
u64 *sptep = rcu_dereference(iter->sptep);
/*
- * The caller is responsible for ensuring the old SPTE is not a REMOVED
- * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
+ * The caller is responsible for ensuring the old SPTE is not a FROZEN
+ * SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,
* and pre-checking before inserting a new SPTE is advantageous as it
* avoids unnecessary work.
*/
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d6f252555ab3..5ab2c92c7331 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2876,6 +2876,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CSTAR:
msr_info->data = svm->vmcb01.ptr->save.cstar;
break;
+ case MSR_GS_BASE:
+ msr_info->data = svm->vmcb01.ptr->save.gs.base;
+ break;
+ case MSR_FS_BASE:
+ msr_info->data = svm->vmcb01.ptr->save.fs.base;
+ break;
case MSR_KERNEL_GS_BASE:
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
break;
@@ -3101,6 +3107,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_CSTAR:
svm->vmcb01.ptr->save.cstar = data;
break;
+ case MSR_GS_BASE:
+ svm->vmcb01.ptr->save.gs.base = data;
+ break;
+ case MSR_FS_BASE:
+ svm->vmcb01.ptr->save.fs.base = data;
+ break;
case MSR_KERNEL_GS_BASE:
svm->vmcb01.ptr->save.kernel_gs_base = data;
break;
@@ -5224,6 +5236,9 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x8000001F (SME/SEV features) */
sev_set_cpu_caps();
+
+ /* Don't advertise Bus Lock Detect to guest if SVM support is absent */
+ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
}
static __init int svm_hardware_setup(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 70219e406987..c983c8e434b8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4656,7 +4656,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ASYNC_PF_INT:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL:
- case KVM_CAP_READONLY_MEM:
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
case KVM_CAP_TSC_DEADLINE_TIMER:
case KVM_CAP_DISABLE_QUIRKS:
@@ -4815,6 +4814,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VM_TYPES:
r = kvm_caps.supported_vm_types;
break;
+ case KVM_CAP_READONLY_MEM:
+ r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
+ break;
default:
break;
}
@@ -6040,7 +6042,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break;
+ kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
+ kvm_vcpu_srcu_read_unlock(vcpu);
break;
}
case KVM_GET_DEBUGREGS: {
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 8d1fb38f745f..96a2653905ae 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -167,10 +167,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct bio_integrity_payload *bip = bio_integrity(bio);
- if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) >
- queue_max_hw_sectors(q))
- return 0;
-
if (bip->bip_vcnt > 0) {
struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
bool same_page = false;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 905290c98c3c..e8643c69d426 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3422,6 +3422,7 @@ static void binder_transaction(struct binder_proc *proc,
*/
copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset ||
+ object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer, user_offset,
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 890c08792ba8..1d53a3f48a0e 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2663,6 +2663,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
+ if (!ub->nr_queues_ready)
+ goto out_unlock;
/*
* START_RECOVERY is only allowd after:
*
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index d87314042528..ad9a84d521fc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -40,7 +40,8 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
-# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
+# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
+# define PLL_ALPHA_MSB BIT(15)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20
@@ -1552,8 +1553,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
}
return regmap_update_bits(regmap, PLL_USER_CTL(pll),
- PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
- val << PLL_POST_DIV_SHIFT);
+ PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
+ val << pll->post_div_shift);
}
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
@@ -2117,6 +2118,18 @@ static void clk_zonda_pll_disable(struct clk_hw *hw)
regmap_write(regmap, PLL_OPMODE(pll), 0x0);
}
+static void zonda_pll_adjust_l_val(unsigned long rate, unsigned long prate, u32 *l)
+{
+ u64 remainder, quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if ((remainder * 2) / prate)
+ *l = *l + 1;
+}
+
static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
@@ -2133,9 +2146,15 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (ret < 0)
return ret;
+ if (a & PLL_ALPHA_MSB)
+ zonda_pll_adjust_l_val(rate, prate, &l);
+
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ if (!clk_hw_is_enabled(hw))
+ return 0;
+
/* Wait before polling for the frequency latch */
udelay(5);
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index d7414361e432..8e0f3372dc7a 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops;
+extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops;
struct clk_rcg_dfs_data {
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 30b19bd39d08..bf26c5448f00 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1348,6 +1348,36 @@ const struct clk_ops clk_rcg2_shared_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /*
+ * Read the config register so that the parent is properly mapped at
+ * registration time.
+ */
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
+
+ return 0;
+}
+
+/*
+ * Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
+ * unchanged at registration time.
+ */
+const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
+ .init = clk_rcg2_shared_no_init_park,
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_shared_get_parent,
+ .set_parent = clk_rcg2_shared_set_parent,
+ .recalc_rate = clk_rcg2_shared_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+ .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
+
/* Common APIs to be used for DFS based RCGR */
static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
struct freq_tbl *f)
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index 80fc94d705a0..645109f75b46 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -68,7 +68,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = {
static struct clk_alpha_pll gpll0_main = {
.offset = 0x20000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(0),
@@ -96,7 +96,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x20000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll0",
@@ -110,7 +110,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_main = {
.offset = 0x22000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(2),
@@ -125,7 +125,7 @@ static struct clk_alpha_pll gpll4_main = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x22000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll4",
@@ -139,7 +139,7 @@ static struct clk_alpha_pll_postdiv gpll4 = {
static struct clk_alpha_pll gpll2_main = {
.offset = 0x21000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x0b000,
.enable_mask = BIT(1),
@@ -154,7 +154,7 @@ static struct clk_alpha_pll gpll2_main = {
static struct clk_alpha_pll_postdiv gpll2 = {
.offset = 0x21000,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll2",
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index 5f11760cf73f..f27d0003f427 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -1500,7 +1500,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -1517,7 +1517,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -1534,7 +1534,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -1551,7 +1551,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -1568,7 +1568,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -1585,7 +1585,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -1617,7 +1617,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -1634,7 +1634,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -1651,7 +1651,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -1668,7 +1668,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -1685,7 +1685,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -1702,7 +1702,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -1719,7 +1719,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -1736,7 +1736,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -1753,7 +1753,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -1770,7 +1770,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -1787,7 +1787,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -1804,7 +1804,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -1821,7 +1821,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -1838,7 +1838,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -1855,7 +1855,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -1872,7 +1872,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -1889,7 +1889,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -1906,7 +1906,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sm8550.c b/drivers/clk/qcom/gcc-sm8550.c
index 7944ddb4b47d..5abaeddd6afc 100644
--- a/drivers/clk/qcom/gcc-sm8550.c
+++ b/drivers/clk/qcom/gcc-sm8550.c
@@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c
index 9bc19bea0c97..fd9d6544bdd5 100644
--- a/drivers/clk/qcom/gcc-sm8650.c
+++ b/drivers/clk/qcom/gcc-sm8650.c
@@ -713,7 +713,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -728,7 +728,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -743,7 +743,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -758,7 +758,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -773,7 +773,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -788,7 +788,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -803,7 +803,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -818,7 +818,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -833,7 +833,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -848,7 +848,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -863,7 +863,7 @@ static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
@@ -899,7 +899,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -916,7 +916,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -948,7 +948,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -980,7 +980,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -997,7 +997,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -1014,7 +1014,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -1031,7 +1031,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -1059,7 +1059,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
.parent_data = gcc_parent_data_2,
.num_parents = ARRAY_SIZE(gcc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
},
};
@@ -1068,7 +1068,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -1085,7 +1085,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -1102,7 +1102,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -1119,7 +1119,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -1136,7 +1136,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -1153,7 +1153,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -1186,7 +1186,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_10,
.num_parents = ARRAY_SIZE(gcc_parent_data_10),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -1203,7 +1203,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@@ -1226,7 +1226,7 @@ static struct clk_init_data gcc_qupv3_wrap3_qspi_ref_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap3_qspi_ref_clk_src = {
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 6ffb3ddcae08..0f578771071f 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -670,7 +670,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@@ -687,7 +687,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@@ -719,7 +719,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@@ -736,7 +736,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@@ -768,7 +768,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@@ -785,7 +785,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@@ -802,7 +802,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@@ -819,7 +819,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@@ -836,7 +836,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@@ -853,7 +853,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@@ -870,7 +870,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@@ -887,7 +887,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@@ -904,7 +904,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@@ -921,7 +921,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@@ -938,7 +938,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@@ -955,7 +955,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@@ -972,7 +972,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@@ -989,7 +989,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@@ -1006,7 +1006,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@@ -1023,7 +1023,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@@ -1040,7 +1040,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@@ -1057,7 +1057,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@@ -1074,7 +1074,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@@ -1091,7 +1091,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_shared_ops,
+ .ops = &clk_rcg2_ops,
};
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@@ -6203,7 +6203,7 @@ static struct gdsc gcc_usb_0_phy_gdsc = {
.pd = {
.name = "gcc_usb_0_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -6215,7 +6215,7 @@ static struct gdsc gcc_usb_1_phy_gdsc = {
.pd = {
.name = "gcc_usb_1_phy_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/starfive/clk-starfive-jh7110-sys.c b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
index 8f5e5abfa178..17325f17696f 100644
--- a/drivers/clk/starfive/clk-starfive-jh7110-sys.c
+++ b/drivers/clk/starfive/clk-starfive-jh7110-sys.c
@@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
}
EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
+/*
+ * This clock notifier is called when the rate of PLL0 clock is to be changed.
+ * The cpu_root clock should save the curent parent clock and switch its parent
+ * clock to osc before PLL0 rate will be changed. Then switch its parent clock
+ * back after the PLL0 rate is completed.
+ */
+static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb);
+ struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk;
+ int ret = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ struct clk *osc = clk_get(priv->dev, "osc");
+
+ priv->original_clk = clk_get_parent(cpu_root);
+ ret = clk_set_parent(cpu_root, osc);
+ clk_put(osc);
+ } else if (action == POST_RATE_CHANGE) {
+ ret = clk_set_parent(cpu_root, priv->original_clk);
+ }
+
+ return notifier_from_errno(ret);
+}
+
static int __init jh7110_syscrg_probe(struct platform_device *pdev)
{
struct jh71x0_clk_priv *priv;
@@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
if (IS_ERR(priv->pll[0]))
return PTR_ERR(priv->pll[0]);
} else {
- clk_put(pllclk);
+ priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb;
+ ret = clk_notifier_register(pllclk, &priv->pll_clk_nb);
+ if (ret)
+ return ret;
priv->pll[0] = NULL;
}
diff --git a/drivers/clk/starfive/clk-starfive-jh71x0.h b/drivers/clk/starfive/clk-starfive-jh71x0.h
index 23e052fc1549..e3f441393e48 100644
--- a/drivers/clk/starfive/clk-starfive-jh71x0.h
+++ b/drivers/clk/starfive/clk-starfive-jh71x0.h
@@ -114,6 +114,8 @@ struct jh71x0_clk_priv {
spinlock_t rmw_lock;
struct device *dev;
void __iomem *base;
+ struct clk *original_clk;
+ struct notifier_block pll_clk_nb;
struct clk_hw *pll[3];
struct jh71x0_clk reg[];
};
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index b2a080647e41..99177835cade 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -137,7 +137,21 @@ static int hv_stimer_init(unsigned int cpu)
ce->name = "Hyper-V clockevent";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->cpumask = cpumask_of(cpu);
- ce->rating = 1000;
+
+ /*
+ * Lower the rating of the Hyper-V timer in a TDX VM without paravisor,
+ * so the local APIC timer (lapic_clockevent) is the default timer in
+ * such a VM. The Hyper-V timer is not preferred in such a VM because
+ * it depends on the slow VM Reference Counter MSR (the Hyper-V TSC
+ * page is not enbled in such a VM because the VM uses Invariant TSC
+ * as a better clocksource and it's challenging to mark the Hyper-V
+ * TSC page shared in very early boot).
+ */
+ if (!ms_hyperv.paravisor_present && hv_isolation_type_tdx())
+ ce->rating = 90;
+ else
+ ce->rating = 1000;
+
ce->set_state_shutdown = hv_ce_shutdown;
ce->set_state_oneshot = hv_ce_set_oneshot;
ce->set_next_event = hv_ce_set_next_event;
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index bd64a8a8427f..92c025b70eb6 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned long next, now;
+ unsigned long next, prev, now;
- next = tpm_read_counter();
- next += delta;
+ prev = tpm_read_counter();
+ next = prev + delta;
writel(next, timer_base + TPM_C0V);
now = tpm_read_counter();
/*
+ * Need to wait CNT increase at least 1 cycle to make sure
+ * the C0V has been updated into HW.
+ */
+ if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
+ while (now == tpm_read_counter())
+ ;
+
+ /*
* NOTE: We observed in a very small probability, the bus fabric
* contention between GPU and A7 may results a few cycles delay
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
- return (int)(next - now) <= 0 ? -ETIME : 0;
+ return (now - prev) >= delta ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index c3f54d9912be..420202bf76e4 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt;
- if (of_irq->percpu)
- free_percpu_irq(of_irq->irq, clkevt);
- else
- free_irq(of_irq->irq, clkevt);
+ free_irq(of_irq->irq, clkevt);
}
/**
@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
* - Get interrupt number by name
* - Get interrupt number by index
*
- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
- * otherwise 'request_irq()' is used.
- *
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_irq_init(struct device_node *np,
@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
return -EINVAL;
}
- ret = of_irq->percpu ?
- request_percpu_irq(of_irq->irq, of_irq->handler,
- np->full_name, clkevt) :
- request_irq(of_irq->irq, of_irq->handler,
- of_irq->flags ? of_irq->flags : IRQF_TIMER,
- np->full_name, clkevt);
+ ret = request_irq(of_irq->irq, of_irq->handler,
+ of_irq->flags ? of_irq->flags : IRQF_TIMER,
+ np->full_name, clkevt);
if (ret) {
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index a5478f3e8589..01a2c6b7db06 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -11,7 +11,6 @@
struct of_timer_irq {
int irq;
int index;
- int percpu;
const char *name;
unsigned long flags;
irq_handler_t handler;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 89bda7a2bb8d..259a917da75f 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1834,20 +1834,34 @@ static bool amd_cppc_supported(void)
}
/*
- * If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC,
- * the AMD Pstate driver may not function correctly.
- * Check the CPPC flag and display a warning message if the platform supports CPPC.
- * Note: below checking code will not abort the driver registeration process because of
- * the code is added for debugging purposes.
+ * If the CPPC feature is disabled in the BIOS for processors
+ * that support MSR-based CPPC, the AMD Pstate driver may not
+ * function correctly.
+ *
+ * For such processors, check the CPPC flag and display a
+ * warning message if the platform supports CPPC.
+ *
+ * Note: The code check below will not abort the driver
+ * registration process because of the code is added for
+ * debugging purposes. Besides, it may still be possible for
+ * the driver to work using the shared-memory mechanism.
*/
if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
- if (c->x86_model > 0x60 && c->x86_model < 0xaf)
+ if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
+ switch (c->x86_model) {
+ case 0x60 ... 0x6F:
+ case 0x80 ... 0xAF:
warn = true;
- } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
- if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
- (c->x86_model > 0x40 && c->x86_model < 0xaf))
+ break;
+ }
+ } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
+ cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ switch (c->x86_model) {
+ case 0x10 ... 0x1F:
+ case 0x40 ... 0xAF:
warn = true;
+ break;
+ }
} else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
warn = true;
}
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 6fefa4fe80e8..447246bd04be 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -715,6 +715,10 @@ static int qcuefi_set_reference(struct qcuefi_client *qcuefi)
static struct qcuefi_client *qcuefi_acquire(void)
{
mutex_lock(&__qcuefi_lock);
+ if (!__qcuefi) {
+ mutex_unlock(&__qcuefi_lock);
+ return NULL;
+ }
return __qcuefi;
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 0bd339813110..365ab947983c 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
return -ENODEV;
pctldev = of_pinctrl_get(pctlnp);
+ of_node_put(pctlnp);
if (!pctldev)
return -EPROBE_DEFER;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index a0d69387c153..2f3c9ebfa78d 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
{ .compatible = "xlnx,zynqmp-gpio-modepin", },
{ }
};
+MODULE_DEVICE_TABLE(of, modepin_platform_id);
static struct platform_driver modepin_platform_driver = {
.driver = {
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 68cc9258ffc4..fa432a1ac9e2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -128,7 +128,6 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
- drm_bridge_connector.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_encoder_slave.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index aad2027e5c7c..0e617dff8765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* always clear VRAM */
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c770cb201e64..1849510a308a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -657,7 +657,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
uint64_t queue_mask = 0;
int r, i, j;
- if (adev->enable_mes)
+ if (adev->mes.enable_legacy_queue_map)
return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
@@ -719,7 +719,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- if (adev->enable_mes) {
+ if (adev->mes.enable_legacy_queue_map) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 0bc837dab578..bcce1add4ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -75,6 +75,7 @@ struct amdgpu_mes {
uint32_t sched_version;
uint32_t kiq_version;
+ bool enable_legacy_queue_map;
uint32_t total_max_queue;
uint32_t max_doorbell_slices;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 2ea8223eb969..8aded0a67037 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -693,6 +693,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
(void **)&adev->mes.ucode_fw_ptr[pipe]);
}
+static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
+{
+ int pipe;
+
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{
uint64_t ucode_addr;
@@ -1062,18 +1084,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
mes_v11_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
-
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
return 0;
}
@@ -1320,15 +1330,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
+ mes_v11_0_get_fw_version(adev);
+
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
- r = mes_v11_0_hw_init(adev);
- if (r)
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
+ adev->mes.enable_legacy_queue_map = true;
+ else
+ adev->mes.enable_legacy_queue_map = false;
+
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v11_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index e39a58d262c9..a79a8adc3aa5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1266,6 +1266,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.funcs = &mes_v12_0_funcs;
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
+ adev->mes.enable_legacy_queue_map = true;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
@@ -1422,9 +1423,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
}
- r = mes_v12_0_hw_init(adev);
- if (r)
- goto failure;
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v12_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 983a977632ff..5942fc4e1c86 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1752,6 +1752,30 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
return bb;
}
+static enum dmub_ips_disable_type dm_get_default_ips_mode(
+ struct amdgpu_device *adev)
+{
+ /*
+ * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+ * cause a hard hang. A fix exists for newer PMFW.
+ *
+ * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+ * IPS state in all cases, except for s0ix and all displays off (DPMS),
+ * where IPS2 is allowed.
+ *
+ * When checking pmfw version, use the major and minor only.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
+ (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
+ return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ return DMUB_IPS_ENABLE;
+
+ /* ASICs older than DCN35 do not have IPSs */
+ return DMUB_IPS_DISABLE_ALL;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1863,7 +1887,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
else
- init_data.flags.disable_ips = DMUB_IPS_ENABLE;
+ init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
init_data.flags.disable_ips_in_vpb = 0;
@@ -4492,7 +4516,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link;
u32 brightness;
- bool rc;
+ bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = dm->backlight_caps[bl_idx];
@@ -4505,6 +4529,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */
+ mutex_lock(&dm->dc_lock);
+ if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dm->dc, false);
+ reallow_idle = true;
+ }
+
if (caps.aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -4516,6 +4546,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
+ if (dm->dc->caps.ips_support && reallow_idle)
+ dc_allow_idle_optimizations(dm->dc, true);
+
+ mutex_unlock(&dm->dc_lock);
+
if (rc)
dm->actual_brightness[bl_idx] = user_brightness;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 603036df68ba..6547cc2c2a77 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -811,7 +811,8 @@ static void build_synchronized_timing_groups(
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
&display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0) {
+ sizeof(struct dml2_timing_cfg)) == 0 &&
+ display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 74e35f8ddefc..2cf951184561 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2266,7 +2266,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level = level;
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
@@ -2345,7 +2346,8 @@ static int smu_switch_power_profile(void *handle,
workload[0] = smu->workload_setting[index];
}
- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
return 0;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index fe46b0ebefea..e5eb5d672bcd 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
+ u32 slave_zpos = 0;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
@@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */
- if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
+ if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
+ slave_zpos = plane_st->normalized_zpos;
+ if (to_kplane_st(plane_st)->layer_split)
+ slave_zpos++;
kcrtc_st->max_slave_zorder =
- max(plane_st->normalized_zpos,
- kcrtc_st->max_slave_zorder);
+ max(slave_zpos, kcrtc_st->max_slave_zorder);
+ }
}
crtc_st->zpos_changed = true;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c621be1a99a8..3eb955333c80 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -390,6 +390,7 @@ config DRM_TI_SN65DSI86
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 479e62690d75..3b824e01c9b5 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -1,19 +1,26 @@
# SPDX-License-Identifier: MIT
+config DRM_DISPLAY_DP_AUX_BUS
+ tristate
+ depends on DRM
+ depends on OF || COMPILE_TEST
+
config DRM_DISPLAY_HELPER
tristate
depends on DRM
help
DRM helpers for display adapters.
-config DRM_DISPLAY_DP_AUX_BUS
- tristate
- depends on DRM
- depends on OF || COMPILE_TEST
+if DRM_DISPLAY_HELPER
+
+config DRM_BRIDGE_CONNECTOR
+ bool
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ help
+ DRM connector implementation terminating DRM bridge chains.
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
- depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select CEC_CORE
help
@@ -25,7 +32,6 @@ config DRM_DISPLAY_DP_AUX_CEC
config DRM_DISPLAY_DP_AUX_CHARDEV
bool "DRM DP AUX Interface"
- depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
help
Choose this option to enable a /dev/drm_dp_auxN node that allows to
@@ -34,7 +40,6 @@ config DRM_DISPLAY_DP_AUX_CHARDEV
config DRM_DISPLAY_DP_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for DisplayPort.
@@ -61,19 +66,18 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
config DRM_DISPLAY_HDCP_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDMI.
config DRM_DISPLAY_HDMI_STATE_HELPER
bool
- depends on DRM_DISPLAY_HELPER
select DRM_DISPLAY_HDMI_HELPER
help
DRM KMS state helpers for HDMI.
+
+endif # DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index 629df2f4d322..fbb9d2b8acd4 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -3,6 +3,8 @@
obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
+drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \
+ drm_bridge_connector.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index a4fbf1eb7ac5..3da5b8bf8259 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -216,8 +216,19 @@ static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
}
}
+static void drm_bridge_connector_reset(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ drm_atomic_helper_connector_reset(connector);
+ if (bridge_connector->bridge_hdmi)
+ __drm_atomic_helper_connector_hdmi_reset(connector,
+ connector->state);
+}
+
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
+ .reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 7ef5a48c8029..b0602c4f3628 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
return 0;
}
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area);
-
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- return fb_deferred_io_mmap(info, vma);
+ return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
}
static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
@@ -73,10 +64,37 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ .fb_mmap = drm_fbdev_dma_fb_mmap,
+ .fb_destroy = drm_fbdev_dma_fb_destroy,
+};
+
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area);
+
+static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+
+ if (!dma->map_noncoherent)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ return fb_deferred_io_mmap(info, vma);
+}
+
+static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = drm_fbdev_dma_fb_open,
+ .fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_fb_mmap,
+ .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
@@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ bool use_deferred_io = false;
struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
@@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
fb = buffer->fb;
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus only
+ * install deferred I/O if we have a framebuffer that requires
+ * it.
+ */
+ if (fb->funcs->dirty)
+ use_deferred_io = true;
+
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- info->fbops = &drm_fbdev_dma_fb_ops;
+ if (use_deferred_io)
+ info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ else
+ info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
@@ -144,14 +175,28 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
}
info->fix.smem_len = info->screen_size;
- /* deferred I/O */
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+ /*
+ * Only set up deferred I/O if the screen buffer supports
+ * it. If this disagrees with the previous test for ->dirty,
+ * mmap on the /dev/fb file might not work correctly.
+ */
+ if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
+ unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
+ if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
+ use_deferred_io = false;
+ }
+
+ /* deferred I/O */
+ if (use_deferred_io) {
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
+ }
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 866b3b409c4d..10689480338e 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -228,7 +228,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
int tfw_exit_latency = 20; /* eDP spec */
int phy_wake = 4; /* eDP spec */
int preamble = 8; /* eDP spec */
- int precharge = intel_dp_aux_fw_sync_len() - preamble;
+ int precharge = intel_dp_aux_fw_sync_len(intel_dp) - preamble;
u8 max_wake_lines;
io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 8713835e2307..f9d3cc3c342b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1885,6 +1885,10 @@ struct intel_dp {
} alpm_parameters;
u8 alpm_dpcd;
+
+ struct {
+ unsigned long mask;
+ } quirks;
};
enum lspcon_vendor {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index dc75a929d3ed..ebe7fe5417ae 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -82,6 +82,7 @@
#include "intel_pch_display.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -3952,6 +3953,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
+ intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
/*
* Read the eDP display control registers.
@@ -4064,6 +4066,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
+ intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+
intel_dp_update_sink_caps(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index b8a53bb174da..be58185a77c0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -13,6 +13,7 @@
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
+#include "intel_quirks.h"
#include "intel_tc.h"
#define AUX_CH_NAME_BUFSIZE 6
@@ -142,16 +143,21 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble;
}
-int intel_dp_aux_fw_sync_len(void)
+int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
{
+ int precharge = 10; /* 10-16 */
+ int preamble = 8;
+
/*
* We faced some glitches on Dell Precision 5490 MTL laptop with panel:
* "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
* is fixing these problems with the panel. It is still within range
- * mentioned in eDP specification.
+ * mentioned in eDP specification. Increasing Fast Wake sync length is
+ * causing problems with other panels: increase length as a quirk for
+ * this specific laptop.
*/
- int precharge = 12; /* 10-16 */
- int preamble = 8;
+ if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
+ precharge += 2;
return precharge + preamble;
}
@@ -211,7 +217,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 76d1f2ed7c2f..593f58fafab7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -20,6 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
-int intel_dp_aux_fw_sync_len(void);
+int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 7602cb30ebf1..e1213f3d93cc 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -326,6 +326,8 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
if (intel_crtc_is_joiner_secondary(crtc_state))
return;
@@ -337,11 +339,30 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
- /* assume 1:1 mapping */
- drm_property_replace_blob(&crtc_state->hw.degamma_lut,
- crtc_state->pre_csc_lut);
- drm_property_replace_blob(&crtc_state->hw.gamma_lut,
- crtc_state->post_csc_lut);
+ if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
+ /* assume 1:1 mapping */
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->pre_csc_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->post_csc_lut);
+ } else {
+ /*
+ * ilk/snb hw may be configured for either pre_csc_lut
+ * or post_csc_lut, but we don't advertise degamma_lut as
+ * being available in the uapi since there is only one
+ * hardware LUT. Always assign the result of the readout
+ * to gamma_lut as that is the only valid source of LUTs
+ * in the uapi.
+ */
+ drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
+ crtc_state->pre_csc_lut);
+
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ NULL);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->post_csc_lut ?:
+ crtc_state->pre_csc_lut);
+ }
drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 14d5fefc9c5b..dfd8b4960e6d 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,6 +14,11 @@ static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id q
display->quirks.mask |= BIT(quirk);
}
+static void intel_set_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
+{
+ intel_dp->quirks.mask |= BIT(quirk);
+}
+
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
@@ -65,6 +70,14 @@ static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
drm_info(display->drm, "Applying no pps backlight power quirk\n");
}
+static void quirk_fw_sync_len(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_set_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN);
+ drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -72,6 +85,21 @@ struct intel_quirk {
void (*hook)(struct intel_display *display);
};
+struct intel_dpcd_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ u8 sink_oui[3];
+ u8 sink_device_id[6];
+ void (*hook)(struct intel_dp *intel_dp);
+};
+
+#define SINK_OUI(first, second, third) { (first), (second), (third) }
+#define SINK_DEVICE_ID(first, second, third, fourth, fifth, sixth) \
+ { (first), (second), (third), (fourth), (fifth), (sixth) }
+
+#define SINK_DEVICE_ID_ANY SINK_DEVICE_ID(0, 0, 0, 0, 0, 0)
+
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
void (*hook)(struct intel_display *display);
@@ -203,6 +231,18 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
+static struct intel_dpcd_quirk intel_dpcd_quirks[] = {
+ /* Dell Precision 5490 */
+ {
+ .device = 0x7d55,
+ .subsystem_vendor = 0x1028,
+ .subsystem_device = 0x0cc7,
+ .sink_oui = SINK_OUI(0x38, 0xec, 0x11),
+ .hook = quirk_fw_sync_len,
+ },
+
+};
+
void intel_init_quirks(struct intel_display *display)
{
struct pci_dev *d = to_pci_dev(display->drm->dev);
@@ -224,7 +264,35 @@ void intel_init_quirks(struct intel_display *display)
}
}
+void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
+ const struct drm_dp_dpcd_ident *ident)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct pci_dev *d = to_pci_dev(display->drm->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) {
+ struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID) &&
+ !memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
+ (!memcmp(q->sink_device_id, ident->device_id,
+ sizeof(ident->device_id)) ||
+ !memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id))))
+ q->hook(intel_dp);
+ }
+}
+
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
return display->quirks.mask & BIT(quirk);
}
+
+bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
+{
+ return intel_dp->quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 151c8f4ae576..cafdebda7535 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
struct intel_display;
+struct intel_dp;
+struct drm_dp_dpcd_ident;
enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT,
@@ -17,9 +19,13 @@ enum intel_quirk_id {
QUIRK_INVERT_BRIGHTNESS,
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+ QUIRK_FW_SYNC_LEN,
};
void intel_init_quirks(struct intel_display *display);
+void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
+ const struct drm_dp_dpcd_ident *ident);
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
+bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 453d855dd1de..3d3191deb0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -302,7 +302,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- if (!intel_uc_fw_is_loadable(&gsc->fw))
+ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 9a431726c8d5..ac7b3aad2222 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
}
+static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
+{
+ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
+}
+
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{
return uc_fw->user_overridden;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 8a9aad523eec..1d4cc91c0e40 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
}
@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
debug_object_free(fence, &i915_sw_fence_debug_descr);
smp_wmb(); /* flush the change in state before reallocation */
@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
}
@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index e59517ba039e..97c0f772ed65 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -114,6 +114,8 @@ struct pvr_vm_gpuva {
struct drm_gpuva base;
};
+#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
+
enum pvr_vm_bind_type {
PVR_VM_BIND_TYPE_MAP,
PVR_VM_BIND_TYPE_UNMAP,
@@ -386,6 +388,7 @@ pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
drm_gpuva_unmap(&op->unmap);
drm_gpuva_unlink(op->unmap.va);
+ kfree(to_pvr_vm_gpuva(op->unmap.va));
return 0;
}
@@ -433,6 +436,7 @@ pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
}
drm_gpuva_unlink(op->remap.unmap->va);
+ kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
return 0;
}
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 3ffc061d392b..59e3b6a1dff0 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,6 +2,8 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig
index 7e57922bbd9d..9c28bb0f4662 100644
--- a/drivers/gpu/drm/imx/lcdc/Kconfig
+++ b/drivers/gpu/drm/imx/lcdc/Kconfig
@@ -3,5 +3,7 @@ config DRM_IMX_LCDC
depends on DRM && (ARCH_MXC || COMPILE_TEST)
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Found on i.MX1, i.MX21, i.MX25 and i.MX27.
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 3db117c5edd9..8cd7b750dffe 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,6 +8,8 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select REGMAP
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index fd011367db1d..e5ae3ec52392 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,6 +3,8 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index d6449ebae838..417ac8c9af41 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -9,6 +9,8 @@ config DRM_MEDIATEK
depends on MTK_MMSYS
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 615fdd0ce41b..2544756538cc 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,6 +4,8 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 26a4c71da63a..90c68106b63b 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -17,6 +17,7 @@ config DRM_MSM
select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 330d72b1a4af..52412965fac1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -324,7 +324,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
return ret;
/* Verify. */
- err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
+ err = nvkm_rd32(device, 0x001400 + (0x15 * 4)) & 0x0000ffff;
if (err) {
nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
return -EIO;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 64e440a2649b..fbd9af758581 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,8 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index c5d3ead38555..d3baccfe6286 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -925,7 +925,7 @@ MODULE_DEVICE_TABLE(spi, nv3052c_ids);
static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info },
{ .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info },
- { .compatible = "wl-355608-a8", .data = &wl_355608_a8_panel_info },
+ { .compatible = "anbernic,rg35xx-plus-panel", .data = &wl_355608_a8_panel_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nv3052c_of_match);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index b5e7b919f241..34182f67136c 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
@@ -996,6 +997,24 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
return panthor_group_destroy(pfile, args->group_handle);
}
+static int group_priority_permit(struct drm_file *file,
+ u8 priority)
+{
+ /* Ensure that priority is valid */
+ if (priority > PANTHOR_GROUP_PRIORITY_HIGH)
+ return -EINVAL;
+
+ /* Medium priority and below are always allowed */
+ if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
+ return 0;
+
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
+ return 0;
+
+ return -EACCES;
+}
+
static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
@@ -1011,6 +1030,10 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
if (ret)
return ret;
+ ret = group_priority_permit(file, args->priority);
+ if (ret)
+ return ret;
+
ret = panthor_group_create(pfile, args, queue_args);
if (ret >= 0) {
args->group_handle = ret;
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 857f3f11258a..ef232c0c2049 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1089,6 +1089,12 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
panthor_fw_stop(ptdev);
ptdev->fw->fast_reset = false;
drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+
+ ret = panthor_vm_flush_all(ptdev->fw->vm);
+ if (ret) {
+ drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
+ return ret;
+ }
}
/* Reload all sections, including RO ones. We're not supposed
@@ -1099,7 +1105,7 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
ret = panthor_fw_start(ptdev);
if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed");
+ drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index fa0a002b1016..cc6e13a97783 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -576,6 +576,12 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
if (as_nr < 0)
return 0;
+ /*
+ * If the AS number is greater than zero, then we can be sure
+ * the device is up and running, so we don't need to explicitly
+ * power it up
+ */
+
if (op != AS_COMMAND_UNLOCK)
lock_region(ptdev, as_nr, iova, size);
@@ -874,14 +880,23 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
if (!drm_dev_enter(&ptdev->base, &cookie))
return 0;
- /* Flush the PTs only if we're already awake */
- if (pm_runtime_active(ptdev->base.dev))
- ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
+ ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
drm_dev_exit(cookie);
return ret;
}
+/**
+ * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
+ * @vm: VM whose cache to flush
+ *
+ * Return: 0 on success, a negative error code if flush failed.
+ */
+int panthor_vm_flush_all(struct panthor_vm *vm)
+{
+ return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
+}
+
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index f3c1ed19f973..6788771071e3 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -31,6 +31,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
+int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 463bcd3cf00f..12b272a912f8 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3092,7 +3092,7 @@ int panthor_group_create(struct panthor_file *pfile,
if (group_args->pad)
return -EINVAL;
- if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH)
+ if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
return -EINVAL;
if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index c17e7c50492c..025677fe88d3 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -5,6 +5,8 @@ config DRM_RCAR_DU
depends on ARM || ARM64 || COMPILE_TEST
depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 5f0db2c5fee6..e1a6dd322caf 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -6,6 +6,8 @@ config DRM_RZG2L_DU
depends on VIDEO_RENESAS_VSP1
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select VIDEOMODE_HELPERS
help
Choose this option if you have an RZ/G2L alike chipset.
diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig
index 027220b8fe1c..c329ab8a7a8b 100644
--- a/drivers/gpu/drm/renesas/shmobile/Kconfig
+++ b/drivers/gpu/drm/renesas/shmobile/Kconfig
@@ -5,6 +5,8 @@ config DRM_SHMOBILE
depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 7df875e38517..23c49e91f1cc 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -86,6 +86,8 @@ config ROCKCHIP_LVDS
bool "Rockchip LVDS support"
depends on DRM_ROCKCHIP
depends on PINCTRL && OF
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Choose this option to enable support for Rockchip LVDS controllers.
Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
@@ -96,6 +98,8 @@ config ROCKCHIP_RGB
bool "Rockchip RGB support"
depends on DRM_ROCKCHIP
depends on PINCTRL
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Choose this option to enable support for Rockchip RGB output.
Some Rockchip CRTCs, like rv1108, can directly output parallel
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 782f51d3044a..e688d8104652 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -8,6 +8,7 @@ config DRM_TEGRA
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index 378600806167..2385c56493b9 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,6 +3,8 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index 0c47661bdc6a..a473aa6697d0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -13,7 +13,7 @@ static inline int
snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
- return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
+ return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
slow_timeout_ms ?: 1);
}
@@ -21,13 +21,13 @@ static inline int
snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
{
- return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
+ return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
}
static inline int
snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
- return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
+ return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
}
static inline int
@@ -35,7 +35,7 @@ skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
int timeout_base_ms)
{
- return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
+ return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
timeout_base_ms);
}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 083c4da2ea41..eb5b5f0e4bd9 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -17,6 +17,13 @@ static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
return xe_root_mmio_gt(xe);
}
+static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
+{
+ struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
+
+ return xe_device_get_root_tile(xe);
+}
+
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 49de4e4f8a75..c860fda410c8 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -315,8 +315,12 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* properly.
*/
intel_power_domains_disable(xe);
- if (has_display(xe))
+ intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+ if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
+ if (!runtime)
+ intel_display_driver_disable_user_access(xe);
+ }
if (!runtime)
intel_display_driver_suspend(xe);
@@ -327,12 +331,13 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_hpd_cancel_work(xe);
- intel_encoder_suspend_all(&xe->display);
+ if (!runtime && has_display(xe)) {
+ intel_display_driver_suspend_access(xe);
+ intel_encoder_suspend_all(&xe->display);
+ }
intel_opregion_suspend(xe, s2idle ? PCI_D1 : PCI_D3cold);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
-
intel_dmc_suspend(xe);
}
@@ -370,14 +375,20 @@ void xe_display_pm_resume(struct xe_device *xe, bool runtime)
intel_display_driver_init_hw(xe);
intel_hpd_init(xe);
+ if (!runtime && has_display(xe))
+ intel_display_driver_resume_access(xe);
+
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(xe);
if (!runtime)
intel_display_driver_resume(xe);
- intel_hpd_poll_disable(xe);
- if (has_display(xe))
+ if (has_display(xe)) {
drm_kms_helper_poll_enable(&xe->drm);
+ if (!runtime)
+ intel_display_driver_enable_user_access(xe);
+ }
+ intel_hpd_poll_disable(xe);
intel_opregion_resume(xe);
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index cbc582bcc90a..9e5fdf96750b 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -203,6 +203,12 @@ struct xe_tile {
} vf;
} sriov;
+ /** @pcode: tile's PCODE */
+ struct {
+ /** @pcode.lock: protecting tile's PCODE mailbox data */
+ struct mutex lock;
+ } pcode;
+
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 2a612652bb13..29f96f409391 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -519,10 +519,22 @@ out_bo:
void xe_gsc_load_start(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
return;
+ /*
+ * The GSC HW is only reset by driver FLR or D3cold entry. We don't
+ * support the former at runtime, while the latter is only supported on
+ * DGFX, for which we don't support GSC. Therefore, if GSC failed to
+ * load previously there is no need to try again because the HW is
+ * stuck in the error state.
+ */
+ xe_assert(xe, !IS_DGFX(xe));
+ if (xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
/* GSC FW survives GT reset and D3Hot */
if (gsc_fw_is_loaded(gt)) {
xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index b9bcbbe27705..b8c73f69fbaf 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -47,7 +47,6 @@
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
-#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
@@ -387,7 +386,6 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
- xe_pcode_init(gt);
spin_lock_init(&gt->global_invl_lock);
return 0;
@@ -755,12 +753,13 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
+ xe_pm_runtime_get(gt_to_xe(gt));
+
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
}
- xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_sanitize(gt);
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
@@ -795,11 +794,11 @@ err_out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
XE_WARN_ON(xe_uc_start(&gt->uc));
- xe_pm_runtime_put(gt_to_xe(gt));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
+ xe_pm_runtime_put(gt_to_xe(gt));
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 38a0d0e178c8..c582541970df 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -310,12 +310,6 @@ struct xe_gt {
/** @eclass: per hardware engine class interface on the GT */
struct xe_hw_engine_class_intf eclass[XE_ENGINE_CLASS_MAX];
- /** @pcode: GT's PCODE */
- struct {
- /** @pcode.lock: protecting GT's PCODE mailbox data */
- struct mutex lock;
- } pcode;
-
/** @sysfs: sysfs' kobj used by xe_gt_sysfs */
struct kobject *sysfs;
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 32e93a8127d4..ccd574e948aa 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -915,7 +915,7 @@ static void pc_init_pcode_freq(struct xe_guc_pc *pc)
u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
- XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
+ XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
}
static int pc_init_freqs(struct xe_guc_pc *pc)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 1faeca70900e..98e3ec08279e 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -441,14 +441,14 @@ static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
if (gt_to_xe(gt)->info.platform == XE_DG2)
return -ENXIO;
- return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ return xe_pcode_read(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
uval, NULL);
}
static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
{
- return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ return xe_pcode_write(gt_to_tile(gt), PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
(uval & POWER_SETUP_I1_DATA_MASK));
}
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9c4eefdf6642..7397d556996a 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -12,7 +12,6 @@
#include "xe_assert.h"
#include "xe_device.h"
-#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@@ -30,7 +29,7 @@
* - PCODE for display operations
*/
-static int pcode_mailbox_status(struct xe_gt *gt)
+static int pcode_mailbox_status(struct xe_tile *tile)
{
u32 err;
static const struct pcode_err_decode err_decode[] = {
@@ -45,9 +44,9 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
- drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
+ drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown");
return err_decode[err].errno ?: -EPROTO;
}
@@ -55,84 +54,85 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
-static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
+ struct xe_gt *mmio = tile->primary_gt;
int err;
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (tile_to_xe(tile)->info.skip_pcode)
return 0;
- if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
+ if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
- xe_mmio_write32(gt, PCODE_DATA0, *data0);
- xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
- xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
+ xe_mmio_write32(mmio, PCODE_DATA0, *data0);
+ xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
+ xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
- err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
+ err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
timeout_ms * USEC_PER_MSEC, NULL, atomic);
if (err)
return err;
if (return_data) {
- *data0 = xe_mmio_read32(gt, PCODE_DATA0);
+ *data0 = xe_mmio_read32(mmio, PCODE_DATA0);
if (data1)
- *data1 = xe_mmio_read32(gt, PCODE_DATA1);
+ *data1 = xe_mmio_read32(mmio, PCODE_DATA1);
}
- return pcode_mailbox_status(gt);
+ return pcode_mailbox_status(tile);
}
-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (tile_to_xe(tile)->info.skip_pcode)
return 0;
- lockdep_assert_held(&gt->pcode.lock);
+ lockdep_assert_held(&tile->pcode.lock);
- return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
+ return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
}
-int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
+int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
{
int err;
- mutex_lock(&gt->pcode.lock);
- err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
- mutex_unlock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
+ mutex_unlock(&tile->pcode.lock);
return err;
}
-int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
+int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
- mutex_lock(&gt->pcode.lock);
- err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
- mutex_unlock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
+ mutex_unlock(&tile->pcode.lock);
return err;
}
-static int pcode_try_request(struct xe_gt *gt, u32 mbox,
+static int pcode_try_request(struct xe_tile *tile, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
- xe_gt_assert(gt, timeout_us > 0);
+ xe_tile_assert(tile, timeout_us > 0);
for (slept = 0; slept < timeout_us; slept += wait) {
if (locked)
- *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ *status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
else
- *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ *status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@@ -149,7 +149,7 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
/**
* xe_pcode_request - send PCODE request until acknowledgment
- * @gt: gt
+ * @tile: tile
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@@ -166,17 +166,17 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
-int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
+int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
- xe_gt_assert(gt, timeout_base_ms <= 3);
+ xe_tile_assert(tile, timeout_base_ms <= 3);
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
- ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@@ -191,20 +191,20 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- drm_err(&gt_to_xe(gt)->drm,
+ drm_err(&tile_to_xe(tile)->drm,
"PCODE timeout, retrying with preemption disabled\n");
preempt_disable();
- ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
true, 50 * 1000, true);
preempt_enable();
out:
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
return status ? status : ret;
}
/**
* xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
- * @gt: gt instance
+ * @tile: tile instance
* @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
* @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
*
@@ -227,30 +227,30 @@ out:
* - -EACCES, "PCODE Rejected"
* - -EPROTO, "Unknown"
*/
-int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq)
{
int ret;
u32 freq;
- if (!gt_to_xe(gt)->info.has_llc)
+ if (!tile_to_xe(tile)->info.has_llc)
return 0;
if (max_gt_freq <= min_gt_freq)
return -EINVAL;
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
- ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
+ ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
&data, NULL, 1, false, false);
if (ret)
goto unlock;
}
unlock:
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
return ret;
}
@@ -270,7 +270,7 @@ unlock:
int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
@@ -281,15 +281,15 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
return 0;
if (locked)
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
- ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
DGFX_INIT_STATUS_COMPLETE,
DGFX_INIT_STATUS_COMPLETE,
&status, false, timeout_us, locked);
if (locked)
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
if (ret)
drm_err(&xe->drm,
@@ -300,14 +300,14 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
/**
* xe_pcode_init: initialize components of PCODE
- * @gt: gt instance
+ * @tile: tile instance
*
* This function initializes the xe_pcode component.
* To be called once only during probe.
*/
-void xe_pcode_init(struct xe_gt *gt)
+void xe_pcode_init(struct xe_tile *tile)
{
- drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+ drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index 3f54c6d2a57d..ba33991d72a7 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -7,21 +7,21 @@
#define _XE_PCODE_H_
#include <linux/types.h>
-struct xe_gt;
+struct xe_tile;
struct xe_device;
-void xe_pcode_init(struct xe_gt *gt);
+void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
int xe_pcode_ready(struct xe_device *xe, bool locked);
-int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq);
-int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
-int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 val,
+int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
+int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
-#define xe_pcode_write(gt, mbox, val) \
- xe_pcode_write_timeout(gt, mbox, val, 1)
+#define xe_pcode_write(tile, mbox, val) \
+ xe_pcode_write_timeout(tile, mbox, val, 1)
-int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_ms);
#define PCODE_MBOX(mbcmd, param1, param2)\
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 15ea0a942f67..dda5268507d8 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -9,6 +9,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_migrate.h"
+#include "xe_pcode.h"
#include "xe_sa.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
@@ -124,6 +125,8 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
if (IS_ERR(tile->primary_gt))
return PTR_ERR(tile->primary_gt);
+ xe_pcode_init(tile);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h
index c108e9d08e70..6195e353f269 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw.h
@@ -65,7 +65,7 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
return "<invalid>";
}
-static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
+static inline int xe_uc_fw_status_to_error(const enum xe_uc_fw_status status)
{
switch (status) {
case XE_UC_FIRMWARE_NOT_SUPPORTED:
@@ -108,7 +108,7 @@ static inline const char *xe_uc_fw_type_repr(enum xe_uc_fw_type type)
}
static inline enum xe_uc_fw_status
-__xe_uc_fw_status(struct xe_uc_fw *uc_fw)
+__xe_uc_fw_status(const struct xe_uc_fw *uc_fw)
{
/* shouldn't call this before checking hw/blob availability */
XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
@@ -156,6 +156,11 @@ static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
return uc_fw->user_overridden;
}
+static inline bool xe_uc_fw_is_in_error_state(const struct xe_uc_fw *uc_fw)
+{
+ return xe_uc_fw_status_to_error(__xe_uc_fw_status(uc_fw)) < 0;
+}
+
static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw)
{
if (xe_uc_fw_is_loadable(uc_fw))
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index 99ff95e408e0..b26e26d73dae 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -34,7 +34,6 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@@ -42,7 +41,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
- err = xe_pcode_read(gt, mbox, &val, NULL);
+ err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;
@@ -57,7 +56,6 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@@ -65,7 +63,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
- err = xe_pcode_read(gt, mbox, &val, NULL);
+ err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 68ee897de9d7..626e5ac4c33d 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -8,6 +8,7 @@ config DRM_ZYNQMP_DPSUB
select DMA_ENGINE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e0d676c74f14..36d9ba097ff5 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -342,9 +342,6 @@ int hv_synic_init(unsigned int cpu)
return 0;
}
-/*
- * hv_synic_cleanup - Cleanup routine for hv_synic_init().
- */
void hv_synic_disable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu =
@@ -436,6 +433,9 @@ retry:
return pending;
}
+/*
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
+ */
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 76ac5185a01a..d2856023d53c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -380,12 +380,6 @@ void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
void hv_vss_onchannelcallback(void *context);
-
-int hv_fcopy_init(struct hv_util_service *srv);
-void hv_fcopy_deinit(void);
-int hv_fcopy_pre_suspend(void);
-int hv_fcopy_pre_resume(void);
-void hv_fcopy_onchannelcallback(void *context);
void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c857dc3975be..965d2a4efb7e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1803,12 +1803,12 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group vmbus_chan_group = {
+static const struct attribute_group vmbus_chan_group = {
.attrs = vmbus_chan_attrs,
.is_visible = vmbus_chan_attr_is_visible
};
-static struct kobj_type vmbus_chan_ktype = {
+static const struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
};
@@ -1952,6 +1952,7 @@ void vmbus_device_unregister(struct hv_device *device_obj)
*/
device_unregister(&device_obj->device);
}
+EXPORT_SYMBOL_GPL(vmbus_device_unregister);
#ifdef CONFIG_ACPI
/*
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 3beed78496c5..108e9ccab1ef 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -147,15 +147,18 @@ struct ad7124_chip_info {
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
- enum ad7124_ref_sel refsel;
- bool bipolar;
- bool buf_positive;
- bool buf_negative;
- unsigned int vref_mv;
- unsigned int pga_bits;
- unsigned int odr;
- unsigned int odr_sel_bits;
- unsigned int filter_type;
+ /* Following fields are used to compare equality. */
+ struct_group(config_props,
+ enum ad7124_ref_sel refsel;
+ bool bipolar;
+ bool buf_positive;
+ bool buf_negative;
+ unsigned int vref_mv;
+ unsigned int pga_bits;
+ unsigned int odr;
+ unsigned int odr_sel_bits;
+ unsigned int filter_type;
+ );
};
struct ad7124_channel {
@@ -334,11 +337,12 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
ptrdiff_t cmp_size;
int i;
- cmp_size = (u8 *)&cfg->live - (u8 *)cfg;
+ cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
- if (cfg_aux->live && !memcmp(cfg, cfg_aux, cmp_size))
+ if (cfg_aux->live &&
+ !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
return cfg_aux;
}
@@ -764,6 +768,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
if (ret < 0)
return ret;
+ fsleep(200);
timeout = 100;
do {
ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
@@ -839,8 +844,6 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
st->channels = channels;
device_for_each_child_node_scoped(dev, child) {
- cfg = &st->channels[channel].cfg;
-
ret = fwnode_property_read_u32(child, "reg", &channel);
if (ret)
return ret;
@@ -858,6 +861,7 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
AD7124_CHANNEL_AINM(ain[1]);
+ cfg = &st->channels[channel].cfg;
cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
ret = fwnode_property_read_u32(child, "adi,reference-select", &tmp);
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 9544bf7142ad..0702ec71aa29 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -302,7 +302,6 @@ static const struct ad7173_device_info ad4114_device_info = {
.num_configs = 8,
.num_voltage_in = 16,
.num_gpios = 4,
- .higher_gpio_bits = true,
.has_vincom_input = true,
.has_temp = true,
.has_input_buf = true,
@@ -320,7 +319,6 @@ static const struct ad7173_device_info ad4115_device_info = {
.num_configs = 8,
.num_voltage_in = 16,
.num_gpios = 4,
- .higher_gpio_bits = true,
.has_vincom_input = true,
.has_temp = true,
.has_input_buf = true,
@@ -338,7 +336,6 @@ static const struct ad7173_device_info ad4116_device_info = {
.num_configs = 8,
.num_voltage_in = 16,
.num_gpios = 4,
- .higher_gpio_bits = true,
.has_vincom_input = true,
.has_temp = true,
.has_input_buf = true,
@@ -1435,11 +1432,11 @@ static int ad7173_probe(struct spi_device *spi)
}
static const struct of_device_id ad7173_of_match[] = {
- { .compatible = "ad4111", .data = &ad4111_device_info },
- { .compatible = "ad4112", .data = &ad4112_device_info },
- { .compatible = "ad4114", .data = &ad4114_device_info },
- { .compatible = "ad4115", .data = &ad4115_device_info },
- { .compatible = "ad4116", .data = &ad4116_device_info },
+ { .compatible = "adi,ad4111", .data = &ad4111_device_info },
+ { .compatible = "adi,ad4112", .data = &ad4112_device_info },
+ { .compatible = "adi,ad4114", .data = &ad4114_device_info },
+ { .compatible = "adi,ad4115", .data = &ad4115_device_info },
+ { .compatible = "adi,ad4116", .data = &ad4116_device_info },
{ .compatible = "adi,ad7172-2", .data = &ad7172_2_device_info },
{ .compatible = "adi,ad7172-4", .data = &ad7172_4_device_info },
{ .compatible = "adi,ad7173-8", .data = &ad7173_8_device_info },
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index 3a417595294f..c321c6ef48df 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -49,7 +49,7 @@ static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
-static int ad7606_reset(struct ad7606_state *st)
+int ad7606_reset(struct ad7606_state *st)
{
if (st->gpio_reset) {
gpiod_set_value(st->gpio_reset, 1);
@@ -60,6 +60,7 @@ static int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
+EXPORT_SYMBOL_NS_GPL(ad7606_reset, IIO_AD7606);
static int ad7606_reg_access(struct iio_dev *indio_dev,
unsigned int reg,
@@ -88,31 +89,6 @@ static int ad7606_read_samples(struct ad7606_state *st)
{
unsigned int num = st->chip_info->num_channels - 1;
u16 *data = st->data;
- int ret;
-
- /*
- * The frstdata signal is set to high while and after reading the sample
- * of the first channel and low for all other channels. This can be used
- * to check that the incoming data is correctly aligned. During normal
- * operation the data should never become unaligned, but some glitch or
- * electrostatic discharge might cause an extra read or clock cycle.
- * Monitoring the frstdata signal allows to recover from such failure
- * situations.
- */
-
- if (st->gpio_frstdata) {
- ret = st->bops->read_block(st->dev, 1, data);
- if (ret)
- return ret;
-
- if (!gpiod_get_value(st->gpio_frstdata)) {
- ad7606_reset(st);
- return -EIO;
- }
-
- data++;
- num--;
- }
return st->bops->read_block(st->dev, num, data);
}
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 0c6a88cc4695..6649e84d25de 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -151,6 +151,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
const char *name, unsigned int id,
const struct ad7606_bus_ops *bops);
+int ad7606_reset(struct ad7606_state *st);
+
enum ad7606_supported_device_ids {
ID_AD7605_4,
ID_AD7606_8,
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index d8408052262e..6bc587b20f05 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -7,6 +7,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/err.h>
@@ -21,8 +22,29 @@ static int ad7606_par16_read_block(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- insw((unsigned long)st->base_address, buf, count);
+ /*
+ * On the parallel interface, the frstdata signal is set to high while
+ * and after reading the sample of the first channel and low for all
+ * other channels. This can be used to check that the incoming data is
+ * correctly aligned. During normal operation the data should never
+ * become unaligned, but some glitch or electrostatic discharge might
+ * cause an extra read or clock cycle. Monitoring the frstdata signal
+ * allows to recover from such failure situations.
+ */
+ int num = count;
+ u16 *_buf = buf;
+
+ if (st->gpio_frstdata) {
+ insw((unsigned long)st->base_address, _buf, 1);
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+ _buf++;
+ num--;
+ }
+ insw((unsigned long)st->base_address, _buf, num);
return 0;
}
@@ -35,8 +57,28 @@ static int ad7606_par8_read_block(struct device *dev,
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
-
- insb((unsigned long)st->base_address, buf, count * 2);
+ /*
+ * On the parallel interface, the frstdata signal is set to high while
+ * and after reading the sample of the first channel and low for all
+ * other channels. This can be used to check that the incoming data is
+ * correctly aligned. During normal operation the data should never
+ * become unaligned, but some glitch or electrostatic discharge might
+ * cause an extra read or clock cycle. Monitoring the frstdata signal
+ * allows to recover from such failure situations.
+ */
+ int num = count;
+ u16 *_buf = buf;
+
+ if (st->gpio_frstdata) {
+ insb((unsigned long)st->base_address, _buf, 2);
+ if (!gpiod_get_value(st->gpio_frstdata)) {
+ ad7606_reset(st);
+ return -EIO;
+ }
+ _buf++;
+ num--;
+ }
+ insb((unsigned long)st->base_address, _buf, num * 2);
return 0;
}
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8c062b0d26e3..dcd557e93586 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_validate_trigger, IIO_AD_SIGMA_DELTA);
static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
- unsigned long irq_flags = irq_get_trigger_type(sigma_delta->spi->irq);
+ unsigned long irq_flags = irq_get_trigger_type(sigma_delta->irq_line);
int ret;
if (dev != &sigma_delta->spi->dev) {
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index 630f5d5f9a60..d649980479e4 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -735,7 +735,7 @@ static int ads1119_probe(struct i2c_client *client)
if (client->irq > 0) {
ret = devm_request_threaded_irq(dev, client->irq,
ads1119_irq_handler,
- NULL, IRQF_TRIGGER_FALLING,
+ NULL, IRQF_ONESHOT,
"ads1119", indio_dev);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 12aa1412dfa0..426cc614587a 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -237,7 +237,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
- goto err_free;
+ goto err_release;
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
@@ -263,6 +263,8 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
return &dmaengine_buffer->queue.buffer;
+err_release:
+ dma_release_channel(chan);
err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 84273660ca2e..3bfeabab0ec4 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -248,12 +248,20 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
int result;
switch (st->chip_type) {
+ case INV_MPU6000:
case INV_MPU6050:
+ case INV_MPU9150:
+ /*
+ * WoM is not supported and interrupt status read seems to be broken for
+ * some chips. Since data ready is the only interrupt, bypass interrupt
+ * status read and always assert data ready bit.
+ */
+ wom_bits = 0;
+ int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT;
+ goto data_ready_interrupt;
case INV_MPU6500:
case INV_MPU6515:
case INV_MPU6880:
- case INV_MPU6000:
- case INV_MPU9150:
case INV_MPU9250:
case INV_MPU9255:
wom_bits = INV_MPU6500_BIT_WOM_INT;
@@ -279,6 +287,7 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
}
}
+data_ready_interrupt:
/* handle raw data interrupt */
if (int_status & INV_MPU6050_BIT_RAW_DATA_RDY_INT) {
indio_dev->pollfunc->timestamp = st->it_timestamp;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 9f484c94bc6e..151099be2863 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -647,17 +647,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
break;
case IIO_VAL_INT_PLUS_MICRO:
if (scale_val2 < 0)
- *processed = -raw64 * scale_val;
+ *processed = -raw64 * scale_val * scale;
else
- *processed = raw64 * scale_val;
+ *processed = raw64 * scale_val * scale;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000LL);
break;
case IIO_VAL_INT_PLUS_NANO:
if (scale_val2 < 0)
- *processed = -raw64 * scale_val;
+ *processed = -raw64 * scale_val * scale;
else
- *processed = raw64 * scale_val;
+ *processed = raw64 * scale_val * scale;
*processed += div_s64(raw64 * (s64)scale_val2 * scale,
1000000000LL);
break;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 51e6964c1305..acff2f64f251 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2174,6 +2174,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
unsigned int journal_section, journal_entry;
unsigned int journal_read_pos;
+ sector_t recalc_sector;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
@@ -2314,6 +2315,7 @@ offload_to_thread:
goto lock_retry;
}
}
+ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
spin_unlock_irq(&ic->endio_wait.lock);
if (unlikely(journal_read_pos != NOT_FOUND)) {
@@ -2368,7 +2370,7 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
+ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
goto skip_check;
if (ic->mode == 'B') {
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 339d126414d4..da87abe93daf 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1910,7 +1910,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
&args[0]);
if (err) {
dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
- goto err_invoke;
+ fastrpc_buf_free(buf);
+ return err;
}
/* update the buffer to be able to deallocate the memory on the DSP */
@@ -1948,8 +1949,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
err_assign:
fastrpc_req_munmap_impl(fl, buf);
-err_invoke:
- fastrpc_buf_free(buf);
return err;
}
diff --git a/drivers/misc/keba/cp500.c b/drivers/misc/keba/cp500.c
index 9ba46f0f9392..ae0922817881 100644
--- a/drivers/misc/keba/cp500.c
+++ b/drivers/misc/keba/cp500.c
@@ -212,12 +212,12 @@ static ssize_t keep_cfg_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(keep_cfg);
-static struct attribute *attrs[] = {
+static struct attribute *cp500_attrs[] = {
&dev_attr_version.attr,
&dev_attr_keep_cfg.attr,
NULL
};
-static const struct attribute_group attrs_group = { .attrs = attrs };
+ATTRIBUTE_GROUPS(cp500);
static void cp500_i2c_release(struct device *dev)
{
@@ -396,20 +396,15 @@ static int cp500_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
pci_set_drvdata(pci_dev, cp500);
- ret = sysfs_create_group(&pci_dev->dev.kobj, &attrs_group);
- if (ret != 0)
- goto out_free_irq;
ret = cp500_enable(cp500);
if (ret != 0)
- goto out_remove_group;
+ goto out_free_irq;
cp500_register_auxiliary_devs(cp500);
return 0;
-out_remove_group:
- sysfs_remove_group(&pci_dev->dev.kobj, &attrs_group);
out_free_irq:
pci_free_irq_vectors(pci_dev);
out_disable:
@@ -427,8 +422,6 @@ static void cp500_remove(struct pci_dev *pci_dev)
cp500_disable(cp500);
- sysfs_remove_group(&pci_dev->dev.kobj, &attrs_group);
-
pci_set_drvdata(pci_dev, 0);
pci_free_irq_vectors(pci_dev);
@@ -450,6 +443,7 @@ static struct pci_driver cp500_driver = {
.id_table = cp500_ids,
.probe = cp500_probe,
.remove = cp500_remove,
+ .dev_groups = cp500_groups,
};
module_pci_driver(cp500_driver);
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 692daa9eff34..19c9d2cdd277 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
spin_lock(&vmci_resource_table.lock);
hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
- if (vmci_handle_is_equal(r->handle, resource->handle)) {
+ if (vmci_handle_is_equal(r->handle, resource->handle) &&
+ resource->type == r->type) {
hlist_del_init_rcu(&r->node);
break;
}
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index cca71867bc4a..92905fc46436 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -15,6 +15,19 @@
#include "card.h"
+static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
+ /*
+ * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
+ * This has so far only been observed on cards from 11/2019, while new
+ * cards from 2023/05 do not exhibit this behavior.
+ */
+ _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ END_FIXUP
+};
+
static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -54,15 +67,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_QUIRK_BLK_NO_CMD23),
/*
- * Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
- * This has so far only been observed on cards from 11/2019, while new
- * cards from 2023/05 do not exhibit this behavior.
- */
- _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
- MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
-
- /*
* Some SD cards lockup while using CMD23 multiblock transfers.
*/
MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 1c8148cdda50..ee37ad14e79e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -26,6 +26,7 @@
#include "host.h"
#include "bus.h"
#include "mmc_ops.h"
+#include "quirks.h"
#include "sd.h"
#include "sd_ops.h"
@@ -1475,6 +1476,9 @@ retry:
goto free_card;
}
+ /* Apply quirks prior to card setup */
+ mmc_fixup_device(card, mmc_sd_fixups);
+
err = mmc_sd_setup_card(host, card, oldcard != NULL);
if (err)
goto free_card;
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index c14d7251d0bb..a02da26a1efd 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -617,7 +617,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cqhci_writel(cq_host, 0, CQHCI_CTL);
mmc->cqe_on = true;
pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
- if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
pr_err("%s: cqhci: CQE failed to exit halt state\n",
mmc_hostname(mmc));
}
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index e9f6e4e62290..41e451235f63 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,8 +2957,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65535;
- mmc->max_seg_size = 0x1000;
- mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
+ mmc->max_seg_size = mmc->max_req_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 430c1f90037b..37240895ffaa 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -510,6 +510,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
{ .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, },
{ }
};
+MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
static struct platform_driver aspeed_sdhci_driver = {
.driver = {
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 73d053501fb1..0102a82e88cc 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
/* Hardware errata - Admin config could not be overwritten if
* config is pending, need reset the TAS module
*/
- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
- ret = -EBUSY;
- goto err_reset_tc;
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ if (val & QSYS_TAG_CONFIG_ENABLE) {
+ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
}
ocelot_rmw_rix(ocelot,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0baac25db4f8..9a542e3c9b05 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -158,6 +158,17 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config OA_TC6
+ tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support"
+ depends on SPI
+ select PHYLIB
+ help
+ This library implements OPEN Alliance TC6 10BASE-T1x MAC-PHY
+ Serial Interface protocol for supporting 10BASE-T1x MAC-PHYs.
+
+ To know the implementation details, refer documentation in
+ <file:Documentation/networking/oa-tc6-framework.rst>.
+
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c03203439c0e..99fa180dedb8 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -105,3 +105,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/
+obj-$(CONFIG_OA_TC6) += oa_tc6.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 6de0d590be34..9d9fa6559354 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -7,6 +7,21 @@
#define ENA_ADMIN_RSS_KEY_PARTS 10
+#define ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK 0x3F
+#define ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK 0x1F
+
+ /* customer metrics - in correlation with
+ * ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ */
+enum ena_admin_customer_metrics_id {
+ ENA_ADMIN_BW_IN_ALLOWANCE_EXCEEDED = 0,
+ ENA_ADMIN_BW_OUT_ALLOWANCE_EXCEEDED = 1,
+ ENA_ADMIN_PPS_ALLOWANCE_EXCEEDED = 2,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_EXCEEDED = 3,
+ ENA_ADMIN_LINKLOCAL_ALLOWANCE_EXCEEDED = 4,
+ ENA_ADMIN_CONNTRACK_ALLOWANCE_AVAILABLE = 5,
+};
+
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
ENA_ADMIN_DESTROY_SQ = 2,
@@ -51,6 +66,9 @@ enum ena_admin_aq_feature_id {
/* device capabilities */
enum ena_admin_aq_caps_id {
ENA_ADMIN_ENI_STATS = 0,
+ /* ENA SRD customer metrics */
+ ENA_ADMIN_ENA_SRD_INFO = 1,
+ ENA_ADMIN_CUSTOMER_METRICS = 2,
};
enum ena_admin_placement_policy_type {
@@ -99,6 +117,9 @@ enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
/* extra HW stats for specific network interface */
ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
+ /* extra HW stats for ENA SRD */
+ ENA_ADMIN_GET_STATS_TYPE_ENA_SRD = 3,
+ ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS = 4,
};
enum ena_admin_get_stats_scope {
@@ -106,6 +127,16 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+/* ENA SRD configuration for ENI */
+enum ena_admin_ena_srd_flags {
+ /* Feature enabled */
+ ENA_ADMIN_ENA_SRD_ENABLED = BIT(0),
+ /* UDP support enabled */
+ ENA_ADMIN_ENA_SRD_UDP_ENABLED = BIT(1),
+ /* Bypass Rx UDP ordering */
+ ENA_ADMIN_ENA_SRD_UDP_ORDERING_BYPASS_ENABLED = BIT(2),
+};
+
struct ena_admin_aq_common_desc {
/* 11:0 : command_id
* 15:12 : reserved12
@@ -363,6 +394,9 @@ struct ena_admin_aq_get_stats_cmd {
* stats of other device
*/
u16 device_id;
+
+ /* a bitmap representing the requested metric values */
+ u64 requested_metrics;
};
/* Basic Statistics Command. */
@@ -419,6 +453,40 @@ struct ena_admin_eni_stats {
u64 linklocal_allowance_exceeded;
};
+struct ena_admin_ena_srd_stats {
+ /* Number of packets transmitted over ENA SRD */
+ u64 ena_srd_tx_pkts;
+
+ /* Number of packets transmitted or could have been
+ * transmitted over ENA SRD
+ */
+ u64 ena_srd_eligible_tx_pkts;
+
+ /* Number of packets received over ENA SRD */
+ u64 ena_srd_rx_pkts;
+
+ /* Percentage of the ENA SRD resources that is in use */
+ u64 ena_srd_resource_utilization;
+};
+
+/* ENA SRD Statistics Command */
+struct ena_admin_ena_srd_info {
+ /* ENA SRD configuration bitmap. See ena_admin_ena_srd_flags for
+ * details
+ */
+ u64 flags;
+
+ struct ena_admin_ena_srd_stats ena_srd_stats;
+};
+
+/* Customer Metrics Command. */
+struct ena_admin_customer_metrics {
+ /* A bitmap representing the reported customer metrics according to
+ * the order they are reported
+ */
+ u64 reported_metrics;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -428,6 +496,10 @@ struct ena_admin_acq_get_stats_resp {
struct ena_admin_basic_stats basic_stats;
struct ena_admin_eni_stats eni_stats;
+
+ struct ena_admin_ena_srd_info ena_srd_info;
+
+ struct ena_admin_customer_metrics customer_metrics;
} u;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 713a595370bf..d958cda9e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1881,6 +1881,56 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
+static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx *ctx,
+ enum ena_admin_get_stats_type type)
+{
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_com_admin_queue *admin_queue;
+ int ret;
+
+ admin_queue = &ena_dev->admin_queue;
+
+ get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
+ get_cmd->aq_common_descriptor.flags = 0;
+ get_cmd->type = type;
+
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+}
+
+static void ena_com_set_supported_customer_metrics(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ customer_metrics = &ena_dev->customer_metrics;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ customer_metrics->supported_metrics = ENA_ADMIN_CUSTOMER_METRICS_MIN_SUPPORT_MASK;
+ return;
+ }
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ctx.get_cmd.requested_metrics = ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ customer_metrics->supported_metrics =
+ ctx.get_resp.u.customer_metrics.reported_metrics;
+ else
+ netdev_err(ena_dev->net_device,
+ "Failed to query customer metrics support. error: %d\n", ret);
+}
+
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -1960,6 +2010,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
else
return rc;
+ ena_com_set_supported_customer_metrics(ena_dev);
+
return 0;
}
@@ -2104,50 +2156,44 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_com_stats_ctx *ctx,
- enum ena_admin_get_stats_type type)
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
{
- struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
- struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
- struct ena_com_admin_queue *admin_queue;
+ struct ena_com_stats_ctx ctx;
int ret;
- admin_queue = &ena_dev->admin_queue;
-
- get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
- get_cmd->aq_common_descriptor.flags = 0;
- get_cmd->type = type;
-
- ret = ena_com_execute_admin_command(admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
- if (unlikely(ret))
- netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
return ret;
}
-int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_eni_stats *stats)
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info)
{
struct ena_com_stats_ctx ctx;
int ret;
- if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENA_SRD_INFO)) {
netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
- ENA_ADMIN_ENI_STATS);
+ ENA_ADMIN_ENA_SRD_INFO);
return -EOPNOTSUPP;
}
memset(&ctx, 0x0, sizeof(ctx));
- ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENA_SRD);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.u.eni_stats,
- sizeof(ctx.get_resp.u.eni_stats));
+ memcpy(info, &ctx.get_resp.u.ena_srd_info,
+ sizeof(ctx.get_resp.u.ena_srd_info));
return ret;
}
@@ -2167,6 +2213,50 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
return ret;
}
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len)
+{
+ struct ena_admin_aq_get_stats_cmd *get_cmd;
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ if (unlikely(len > ena_dev->customer_metrics.buffer_len)) {
+ netdev_err(ena_dev->net_device,
+ "Invalid buffer size %u. The given buffer is too big.\n", len);
+ return -EINVAL;
+ }
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ netdev_err(ena_dev->net_device, "Capability %d not supported.\n",
+ ENA_ADMIN_CUSTOMER_METRICS);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ena_dev->customer_metrics.supported_metrics) {
+ netdev_err(ena_dev->net_device, "No supported customer metrics.\n");
+ return -EOPNOTSUPP;
+ }
+
+ get_cmd = &ctx.get_cmd;
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_com_mem_addr_set(ena_dev,
+ &get_cmd->u.control_buffer.address,
+ ena_dev->customer_metrics.buffer_dma_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device, "Memory address set failed.\n");
+ return ret;
+ }
+
+ get_cmd->u.control_buffer.length = ena_dev->customer_metrics.buffer_len;
+ get_cmd->requested_metrics = ena_dev->customer_metrics.supported_metrics;
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS);
+ if (likely(ret == 0))
+ memcpy(buffer, ena_dev->customer_metrics.buffer_virt_addr, len);
+ else
+ netdev_err(ena_dev->net_device, "Failed to get customer metrics. error: %d\n", ret);
+
+ return ret;
+}
+
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
{
struct ena_com_admin_queue *admin_queue;
@@ -2706,6 +2796,24 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
return 0;
}
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE;
+ customer_metrics->buffer_virt_addr = NULL;
+
+ customer_metrics->buffer_virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ &customer_metrics->buffer_dma_addr, GFP_KERNEL);
+ if (!customer_metrics->buffer_virt_addr) {
+ customer_metrics->buffer_len = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
@@ -2728,6 +2836,19 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
}
}
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev)
+{
+ struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics;
+
+ if (customer_metrics->buffer_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, customer_metrics->buffer_len,
+ customer_metrics->buffer_virt_addr,
+ customer_metrics->buffer_dma_addr);
+ customer_metrics->buffer_virt_addr = NULL;
+ customer_metrics->buffer_len = 0;
+ }
+}
+
int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 924f03f5a6c7..a372c5e768a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -42,6 +42,8 @@
#define ADMIN_CQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_acq_entry))
#define ADMIN_AENQ_SIZE(depth) ((depth) * sizeof(struct ena_admin_aenq_entry))
+#define ENA_CUSTOMER_METRICS_BUFFER_SIZE 512
+
/*****************************************************************************/
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
@@ -278,6 +280,16 @@ struct ena_rss {
};
+struct ena_customer_metrics {
+ /* in correlation with ENA_ADMIN_CUSTOMER_METRICS_SUPPORT_MASK
+ * and ena_admin_customer_metrics_id
+ */
+ u64 supported_metrics;
+ dma_addr_t buffer_dma_addr;
+ void *buffer_virt_addr;
+ u32 buffer_len;
+};
+
struct ena_host_attribute {
/* Debug area */
u8 *debug_area_virt_addr;
@@ -327,6 +339,8 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
+
+ struct ena_customer_metrics customer_metrics;
};
struct ena_com_dev_get_features_ctx {
@@ -595,6 +609,24 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_admin_eni_stats *stats);
+/* ena_com_get_ena_srd_info - Get ENA SRD network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @info: ena srd stats and flags
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_ena_srd_info(struct ena_com_dev *ena_dev,
+ struct ena_admin_ena_srd_info *info);
+
+/* ena_com_get_customer_metrics - Get customer metrics for network interface
+ * @ena_dev: ENA communication layer struct
+ * @buffer: buffer for returned customer metrics
+ * @len: size of the buffer
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_customer_metrics(struct ena_com_dev *ena_dev, char *buffer, u32 len);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
@@ -805,6 +837,13 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev);
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
u32 debug_area_size);
+/* ena_com_allocate_customer_metrics_buffer - Allocate customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
@@ -819,6 +858,13 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
+/* ena_com_delete_customer_metrics_buffer - Free the customer metrics resources.
+ * @ena_dev: ENA communication layer struct
+ *
+ * Free the allocated customer metrics area.
+ */
+void ena_com_delete_customer_metrics_buffer(struct ena_com_dev *ena_dev);
+
/* ena_com_set_host_attributes - Update the device with the host
* attributes (debug area and host info) base address.
* @ena_dev: ENA communication layer struct
@@ -975,6 +1021,28 @@ static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
return !!(ena_dev->capabilities & BIT(cap_id));
}
+/* ena_com_get_customer_metric_support - query whether device supports a given customer metric.
+ * @ena_dev: ENA communication layer struct
+ * @metric_id: enum value representing the customer metric
+ *
+ * @return - true if customer metric is supported or false otherwise
+ */
+static inline bool ena_com_get_customer_metric_support(struct ena_com_dev *ena_dev,
+ enum ena_admin_customer_metrics_id metric_id)
+{
+ return !!(ena_dev->customer_metrics.supported_metrics & BIT(metric_id));
+}
+
+/* ena_com_get_customer_metric_count - return the number of supported customer metrics.
+ * @ena_dev: ENA communication layer struct
+ *
+ * @return - the number of supported customer metrics
+ */
+static inline int ena_com_get_customer_metric_count(struct ena_com_dev *ena_dev)
+{
+ return hweight64(ena_dev->customer_metrics.supported_metrics);
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b24cc3f05248..60fb35ec4b15 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -14,6 +14,10 @@ struct ena_stats {
int stat_offset;
};
+struct ena_hw_metrics {
+ char name[ETH_GSTRING_LEN];
+};
+
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
.stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
@@ -41,6 +45,18 @@ struct ena_stats {
#define ENA_STAT_ENI_ENTRY(stat) \
ENA_STAT_HW_ENTRY(stat, eni_stats)
+#define ENA_STAT_ENA_SRD_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, ena_srd_stats)
+
+#define ENA_STAT_ENA_SRD_MODE_ENTRY(stat) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_ena_srd_info, flags) / sizeof(u64) \
+}
+
+#define ENA_METRIC_ENI_ENTRY(stat) { \
+ .name = #stat \
+}
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -52,6 +68,9 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
+/* A partial list of hw stats. Used when admin command
+ * with type ENA_ADMIN_GET_STATS_TYPE_CUSTOMER_METRICS is not supported
+ */
static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
@@ -60,6 +79,23 @@ static const struct ena_stats ena_stats_eni_strings[] = {
ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
};
+static const struct ena_hw_metrics ena_hw_stats_strings[] = {
+ ENA_METRIC_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(linklocal_allowance_exceeded),
+ ENA_METRIC_ENI_ENTRY(conntrack_allowance_available),
+};
+
+static const struct ena_stats ena_srd_info_strings[] = {
+ ENA_STAT_ENA_SRD_MODE_ENTRY(ena_srd_mode),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_eligible_tx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_rx_pkts),
+ ENA_STAT_ENA_SRD_ENTRY(ena_srd_resource_utilization)
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -112,7 +148,9 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENI ARRAY_SIZE(ena_stats_eni_strings)
+#define ENA_STATS_ARRAY_ENA_SRD ARRAY_SIZE(ena_srd_info_strings)
+#define ENA_METRICS_ARRAY_ENI ARRAY_SIZE(ena_hw_stats_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -125,6 +163,57 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
} while (u64_stats_fetch_retry(syncp, start));
}
+static void ena_metrics_stats(struct ena_adapter *adapter, u64 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_stats *ena_stats;
+ u64 *ptr;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ u32 supported_metrics_count;
+ int len;
+
+ supported_metrics_count = ena_com_get_customer_metric_count(dev);
+ len = supported_metrics_count * sizeof(u64);
+
+ /* Fill the data buffer, and advance its pointer */
+ ena_com_get_customer_metrics(dev, (char *)(*data), len);
+ (*data) += supported_metrics_count;
+
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ ena_com_get_eni_stats(dev, &adapter->eni_stats);
+ /* Updating regardless of rc - once we told ethtool how many stats we have
+ * it will print that much stats. We can't leave holes in the stats
+ */
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ ena_com_get_ena_srd_info(dev, &adapter->ena_srd_info);
+ /* Get ENA SRD mode */
+ ptr = (u64 *)&adapter->ena_srd_info;
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ for (i = 1; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ /* Wrapped within an outer struct - need to accommodate an
+ * additional offset of the ENA SRD mode that was already processed
+ */
+ ptr = (u64 *)&adapter->ena_srd_info +
+ ena_stats->stat_offset + 1;
+
+ ena_safe_update_stat(ptr, (*data)++, &adapter->syncp);
+ }
+ }
+}
+
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
{
const struct ena_stats *ena_stats;
@@ -179,7 +268,7 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
static void ena_get_stats(struct ena_adapter *adapter,
u64 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
u64 *ptr;
@@ -193,17 +282,8 @@ static void ena_get_stats(struct ena_adapter *adapter,
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
- if (eni_stats_needed) {
- ena_update_hw_stats(adapter);
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
-
- ptr = (u64 *)&adapter->eni_stats +
- ena_stats->stat_offset;
-
- ena_safe_update_stat(ptr, data++, &adapter->syncp);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats(adapter, &data);
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
@@ -214,9 +294,8 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_stats(adapter, data, true);
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -228,9 +307,17 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+ struct ena_com_dev *dev = adapter->ena_dev;
+ int count;
+
+ count = ENA_STATS_ARRAY_ENA_SRD * ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO);
- return ENA_STATS_ARRAY_ENI(adapter) * supported;
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS))
+ count += ena_com_get_customer_metric_count(dev);
+ else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS))
+ count += ENA_STATS_ARRAY_ENI;
+
+ return count;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -246,6 +333,35 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
return -EOPNOTSUPP;
}
+static void ena_metrics_stats_strings(struct ena_adapter *adapter, u8 **data)
+{
+ struct ena_com_dev *dev = adapter->ena_dev;
+ const struct ena_hw_metrics *ena_metrics;
+ const struct ena_stats *ena_stats;
+ int i;
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_CUSTOMER_METRICS)) {
+ for (i = 0; i < ENA_METRICS_ARRAY_ENI; i++) {
+ if (ena_com_get_customer_metric_support(dev, i)) {
+ ena_metrics = &ena_hw_stats_strings[i];
+ ethtool_puts(data, ena_metrics->name);
+ }
+ }
+ } else if (ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI; i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+
+ if (ena_com_get_cap(dev, ENA_ADMIN_ENA_SRD_INFO)) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENA_SRD; i++) {
+ ena_stats = &ena_srd_info_strings[i];
+ ethtool_puts(data, ena_stats->name);
+ }
+ }
+}
+
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
@@ -291,7 +407,7 @@ static void ena_com_dev_strings(u8 **data)
static void ena_get_strings(struct ena_adapter *adapter,
u8 *data,
- bool eni_stats_needed)
+ bool hw_stats_needed)
{
const struct ena_stats *ena_stats;
int i;
@@ -301,12 +417,8 @@ static void ena_get_strings(struct ena_adapter *adapter,
ethtool_puts(&data, ena_stats->name);
}
- if (eni_stats_needed) {
- for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
- ena_stats = &ena_stats_eni_strings[i];
- ethtool_puts(&data, ena_stats->name);
- }
- }
+ if (hw_stats_needed)
+ ena_metrics_stats_strings(adapter, &data);
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
@@ -317,11 +429,10 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
+ ena_get_strings(adapter, data, true);
break;
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 184b6e6cbed4..c5b50cfa935a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2798,19 +2798,6 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
-int ena_update_hw_stats(struct ena_adapter *adapter)
-{
- int rc;
-
- rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
- if (rc) {
- netdev_err(adapter->netdev, "Failed to get ENI stats\n");
- return rc;
- }
-
- return 0;
-}
-
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3944,10 +3931,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
+ if (rc) {
+ netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
@@ -3955,7 +3948,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_netdev_destroy;
+ goto err_metrics_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4076,6 +4069,8 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+err_metrics_destroy:
+ ena_com_delete_customer_metrics_buffer(ena_dev);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4139,6 +4134,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_com_delete_host_info(ena_dev);
+ ena_com_delete_customer_metrics_buffer(ena_dev);
+
ena_release_bars(ena_dev, pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index d59509747d1a..6e12ae3b12e5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -373,6 +373,7 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
+ struct ena_admin_ena_srd_info ena_srd_info;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -390,7 +391,6 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
-int ena_update_hw_stats(struct ena_adapter *adapter);
int ena_update_queue_params(struct ena_adapter *adapter,
u32 new_tx_size,
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 292b1f9cd9e7..785f4b4ff758 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1317,7 +1317,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
+ ret = request_irq(dev->irq, bmac_misc_intr, IRQF_NO_AUTOEN, "BMAC-misc", dev);
if (ret) {
printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
goto err_out_iounmap_rx;
@@ -1336,7 +1336,6 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* Mask chip interrupts and disable chip, will be
* re-enabled on open()
*/
- disable_irq(dev->irq);
pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
if (register_netdev(dev) != 0) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c9248ed9330c..6e422e24750a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -13803,6 +13803,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13835,7 +13836,23 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
}
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hwr.cp_p5 = hwr.tx + rx;
- return bnxt_hwrm_check_rings(bp, &hwr);
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
+ }
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 3b805ed433ed..69231e85140b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1217,12 +1217,15 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 094e0b5c8521..f71cc8188b4e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -955,11 +955,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,6 +963,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b9e7d3e7b15d..fdd6356f21ef 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -176,11 +176,17 @@ EXPORT_SYMBOL(bnxt_unregister_dev);
static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
+
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 4eafe6ec0abf..4f4914f5c84c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 63b3e02fab16..4968f6f0bdbc 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -84,7 +84,7 @@
FTGMAC100_INT_RPKT_BUF)
/* All the interrupts we care about */
-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
+#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
FTGMAC100_INT_BAD)
/*
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 1d0208b5db7d..e15dd3d858df 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2273,12 +2273,12 @@ static netdev_tx_t
dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
{
const int queue_mapping = skb_get_queue_mapping(skb);
- bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
+ bool nonlinear;
int offset = 0;
int err = 0;
@@ -2288,6 +2288,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
qm_fd_clear_fd(&fd);
+ /* Packet data is always read as 32-bit words, so zero out any part of
+ * the skb which might be sent if we have to pad the packet
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN, false))
+ goto enomem;
+
+ nonlinear = skb_is_nonlinear(skb);
if (!nonlinear) {
/* We're going to store the skb backpointer at the beginning
* of the data buffer, so we need a privately owned skb
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 5c45f42232d3..f04f42ea60c0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2305,12 +2305,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
- err = request_irq(irq, enetc_msix, 0, v->name, v);
+ err = request_irq(irq, enetc_msix, IRQF_NO_AUTOEN, v->name, v);
if (err) {
dev_err(priv->dev, "request_irq() failed!\n");
goto irq_err;
}
- disable_irq(irq);
v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2baef59f741d..ecb1703ea150 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -754,6 +754,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
err = of_get_ethdev_address(np, dev);
+ if (err == -EPROBE_DEFER)
+ goto err_grp_init;
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index b4f6fa4ba13d..3307d551f431 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -33,6 +33,8 @@ ice-y := ice_main.o \
ice_idc.o \
devlink/devlink.o \
devlink/devlink_port.o \
+ ice_sf_eth.o \
+ ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 810a901d7afd..415445cefdb2 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,9 +6,11 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
+#include "devlink_port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
+#include "ice_sf_eth.h"
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -744,6 +746,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_sched_node *tc_node, struct ice_pf *pf)
{
struct devlink_rate *rate_node = NULL;
+ struct ice_dynamic_port *sf;
struct ice_vf *vf;
int i;
@@ -755,6 +758,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
} else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_VF &&
pf->vsi[node->vsi_handle]->vf) {
vf = pf->vsi[node->vsi_handle]->vf;
if (!vf->devlink_port.devlink_rate)
@@ -763,6 +767,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
*/
devl_rate_leaf_create(&vf->devlink_port, node,
node->parent->rate_node);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->type == ICE_VSI_SF &&
+ pf->vsi[node->vsi_handle]->sf) {
+ sf = pf->vsi[node->vsi_handle]->sf;
+ if (!sf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&sf->devlink_port, node,
+ node->parent->rate_node);
} else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
node->parent->rate_node) {
rate_node = devl_rate_node_create(devlink, node, node->name,
@@ -1277,8 +1291,12 @@ static const struct devlink_ops ice_devlink_ops = {
.rate_leaf_parent_set = ice_devlink_set_parent,
.rate_node_parent_set = ice_devlink_set_parent,
+
+ .port_new = ice_devlink_port_new,
};
+static const struct devlink_ops ice_sf_devlink_ops;
+
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
@@ -1562,6 +1580,34 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
}
/**
+ * ice_allocate_sf - Allocate devlink and return SF structure pointer
+ * @dev: the device to allocate for
+ * @pf: pointer to the PF structure
+ *
+ * Allocate a devlink instance for SF.
+ *
+ * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error
+ */
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv),
+ dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(pf), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
+ return devlink_priv(devlink);
+}
+
+/**
* ice_devlink_register - Register devlink interface for this PF
* @pf: the PF to register the devlink for.
*
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..1af3b0763fbb 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
@@ -5,6 +5,7 @@
#define _ICE_DEVLINK_H_
struct ice_pf *ice_allocate_pf(struct device *dev);
+struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
index 62ef8e2fb5f1..928c8bdb6649 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -5,6 +5,9 @@
#include "ice.h"
#include "devlink.h"
+#include "devlink_port.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
static int ice_active_port_option = -1;
@@ -485,3 +488,506 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)
devl_rate_leaf_destroy(&vf->devlink_port);
devl_port_unregister(&vf->devlink_port);
}
+
+/**
+ * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Register virtual flavour devlink port for the subfunction auxiliary device
+ * created after activating a dynamically added devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ struct devlink_port_attrs attrs = {};
+ struct ice_dynamic_port *dyn_port;
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+
+ dyn_port = sf_dev->dyn_port;
+ vsi = dyn_port->vsi;
+
+ devlink_port = &sf_dev->priv->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(sf_dev->priv);
+
+ return devl_port_register(devlink, devlink_port, vsi->idx);
+}
+
+/**
+ * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction
+ * @sf_dev: the subfunction device to create a devlink port for
+ *
+ * Unregisters the virtual port associated with this subfunction.
+ */
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev)
+{
+ devl_port_unregister(&sf_dev->priv->devlink_port);
+}
+
+/**
+ * ice_activate_dynamic_port - Activate a dynamic port
+ * @dyn_port: dynamic port instance to activate
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port based on its flavour.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (dyn_port->active)
+ return 0;
+
+ err = ice_sf_eth_activate(dyn_port, extack);
+ if (err)
+ return err;
+
+ dyn_port->active = true;
+
+ return 0;
+}
+
+/**
+ * ice_deactivate_dynamic_port - Deactivate a dynamic port
+ * @dyn_port: dynamic port instance to deactivate
+ *
+ * Undo activation of a dynamic port.
+ */
+static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ if (!dyn_port->active)
+ return;
+
+ ice_sf_eth_deactivate(dyn_port);
+ dyn_port->active = false;
+}
+
+/**
+ * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port
+ * @dyn_port: dynamic port instance to deallocate
+ *
+ * Free resources associated with a dynamically added devlink port. Will
+ * deactivate the port if its currently active.
+ */
+static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port *devlink_port = &dyn_port->devlink_port;
+ struct ice_pf *pf = dyn_port->pf;
+
+ ice_deactivate_dynamic_port(dyn_port);
+
+ xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf);
+ ice_eswitch_detach_sf(pf, dyn_port);
+ ice_vsi_free(dyn_port->vsi);
+ xa_erase(&pf->dyn_ports, dyn_port->vsi->idx);
+ kfree(dyn_port);
+}
+
+/**
+ * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports
+ * @pf: pointer to the pf structure
+ */
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf)
+{
+ struct ice_dynamic_port *dyn_port;
+ unsigned long index;
+
+ xa_for_each(&pf->dyn_ports, index, dyn_port)
+ ice_dealloc_dynamic_port(dyn_port);
+}
+
+/**
+ * ice_devlink_port_new_check_attr - Check that new port attributes are valid
+ * @pf: pointer to the PF structure
+ * @new_attr: the attributes for the new port
+ * @extack: extack for reporting error messages
+ *
+ * Check that the attributes for the new port are valid before continuing to
+ * allocate the devlink port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_new_check_attr(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->controller_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+
+ if (new_attr->pfnum != pf->hw.pf_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied");
+ return -EINVAL;
+ }
+
+ if (!pci_msix_can_alloc_dyn(pf->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_del - devlink handler for port delete
+ * @devlink: pointer to devlink
+ * @port: devlink port to be deleted
+ * @extack: pointer to extack
+ *
+ * Deletes devlink port and deallocates all resources associated with
+ * created subfunction.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+ ice_dealloc_dynamic_port(dyn_port);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Sets mac address for the port, verifies arguments and copies address
+ * to the subfunction structure.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->attached) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ethernet address can be change only in detached state");
+ return -EBUSY;
+ }
+
+ if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address");
+ return -EADDRNOTAVAIL;
+ }
+
+ ether_addr_copy(dyn_port->hw_addr, hw_addr);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get
+ * @port: pointer to devlink port
+ * @hw_addr: hw address to set
+ * @hw_addr_len: hw address length
+ * @extack: extack for reporting error messages
+ *
+ * Returns mac address for the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ ether_addr_copy(hw_addr, dyn_port->hw_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_set - devlink handler for port state set
+ * @port: pointer to devlink port
+ * @state: state to set
+ * @extack: extack for reporting error messages
+ *
+ * Activates or deactivates the port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return ice_activate_dynamic_port(dyn_port, extack);
+
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ ice_deactivate_dynamic_port(dyn_port);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_fn_state_get - devlink handler for port state get
+ * @port: pointer to devlink port
+ * @state: admin configured state of the port
+ * @opstate: current port operational state
+ * @extack: extack for reporting error messages
+ *
+ * Gets port state.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dynamic_port *dyn_port;
+
+ dyn_port = ice_devlink_port_to_dyn(port);
+
+ if (dyn_port->active)
+ *state = DEVLINK_PORT_FN_STATE_ACTIVE;
+ else
+ *state = DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ if (dyn_port->attached)
+ *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ else
+ *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED;
+
+ return 0;
+}
+
+static const struct devlink_port_ops ice_devlink_port_sf_ops = {
+ .port_del = ice_devlink_port_del,
+ .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set,
+ .port_fn_state_get = ice_devlink_port_fn_state_get,
+ .port_fn_state_set = ice_devlink_port_fn_state_set,
+};
+
+/**
+ * ice_reserve_sf_num - Reserve a subfunction number for this port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @sfnum: on success, the sf number reserved
+ *
+ * Reserve a subfunction number for this port. Only called for
+ * DEVLINK_PORT_FLAVOUR_PCI_SF ports.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_reserve_sf_num(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack, u32 *sfnum)
+{
+ int err;
+
+ /* If user didn't request an explicit number, pick one */
+ if (!new_attr->sfnum_valid)
+ return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b,
+ GFP_KERNEL);
+
+ /* Otherwise, check and use the number provided */
+ err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL);
+ if (err) {
+ if (err == -EBUSY)
+ NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists");
+ return err;
+ }
+
+ *sfnum = new_attr->sfnum;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_create_sf_port - Register PCI subfunction devlink port
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Register PCI subfunction flavour devlink port for a dynamically added
+ * subfunction port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = dyn_port->vsi;
+ pf = dyn_port->pf;
+
+ devlink_port = &dyn_port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF;
+ attrs.pci_sf.pf = pf->hw.pf_id;
+ attrs.pci_sf.sf = dyn_port->sfnum;
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ return devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_sf_ops);
+}
+
+/**
+ * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF
+ * @dyn_port: the dynamic port instance structure for this subfunction
+ *
+ * Unregisters the devlink_port structure associated with this SF.
+ */
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port)
+{
+ devl_rate_leaf_destroy(&dyn_port->devlink_port);
+ devl_port_unregister(&dyn_port->devlink_port);
+}
+
+/**
+ * ice_alloc_dynamic_port - Allocate new dynamic port
+ * @pf: pointer to the pf structure
+ * @new_attr: devlink port attributes requested
+ * @extack: extack for reporting error messages
+ * @devlink_port: index of newly created devlink port
+ *
+ * Allocate a new dynamic port instance and prepare it for configuration
+ * with devlink.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_alloc_dynamic_port(struct ice_pf *pf,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_dynamic_port *dyn_port;
+ struct ice_vsi *vsi;
+ u32 sfnum;
+ int err;
+
+ err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum);
+ if (err)
+ return err;
+
+ dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL);
+ if (!dyn_port) {
+ err = -ENOMEM;
+ goto unroll_reserve_sf_num;
+ }
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI");
+ err = -ENOMEM;
+ goto unroll_dyn_port_alloc;
+ }
+
+ dyn_port->vsi = vsi;
+ dyn_port->pf = pf;
+ dyn_port->sfnum = sfnum;
+ eth_random_addr(dyn_port->hw_addr);
+
+ err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed");
+ goto unroll_vsi_alloc;
+ }
+
+ err = ice_eswitch_attach_sf(pf, dyn_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch");
+ goto unroll_xa_insert;
+ }
+
+ *devlink_port = &dyn_port->devlink_port;
+
+ return 0;
+
+unroll_xa_insert:
+ xa_erase(&pf->dyn_ports, vsi->idx);
+unroll_vsi_alloc:
+ ice_vsi_free(vsi);
+unroll_dyn_port_alloc:
+ kfree(dyn_port);
+unroll_reserve_sf_num:
+ xa_erase(&pf->sf_nums, sfnum);
+
+ return err;
+}
+
+/**
+ * ice_devlink_port_new - devlink handler for the new port
+ * @devlink: pointer to devlink
+ * @new_attr: pointer to the port new attributes
+ * @extack: extack for reporting error messages
+ * @devlink_port: pointer to a new port
+ *
+ * Creates new devlink port, checks new port attributes and reject
+ * any unsupported parameters, allocates new subfunction for that port.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
+
+ err = ice_devlink_port_new_check_attr(pf, new_attr, extack);
+ if (err)
+ return err;
+
+ return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
index 9223bcdb6444..d60efc340945 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -4,9 +4,55 @@
#ifndef _DEVLINK_PORT_H_
#define _DEVLINK_PORT_H_
+#include "../ice.h"
+#include "../ice_sf_eth.h"
+
+/**
+ * struct ice_dynamic_port - Track dynamically added devlink port instance
+ * @hw_addr: the HW address for this port
+ * @active: true if the port has been activated
+ * @attached: true it the prot is attached
+ * @devlink_port: the associated devlink port structure
+ * @pf: pointer to the PF private structure
+ * @vsi: the VSI associated with this port
+ * @repr_id: the representor ID
+ * @sfnum: the subfunction ID
+ * @sf_dev: pointer to the subfunction device
+ *
+ * An instance of a dynamically added devlink port. Each port flavour
+ */
+struct ice_dynamic_port {
+ u8 hw_addr[ETH_ALEN];
+ u8 active: 1;
+ u8 attached: 1;
+ struct devlink_port devlink_port;
+ struct ice_pf *pf;
+ struct ice_vsi *vsi;
+ unsigned long repr_id;
+ u32 sfnum;
+ /* Flavour-specific implementation data */
+ union {
+ struct ice_sf_dev *sf_dev;
+ };
+};
+
+void ice_dealloc_all_dynamic_ports(struct ice_pf *pf);
+
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port);
+void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port);
+int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev);
+void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev);
+
+#define ice_devlink_port_to_dyn(port) \
+ container_of(port, struct ice_dynamic_port, devlink_port)
+int
+ice_devlink_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ struct devlink_port **devlink_port);
#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ce8b5505b16d..d6f80da30dec 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -451,7 +451,12 @@ struct ice_vsi {
struct_group_tagged(ice_vsi_cfg_params, params,
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_channel *ch; /* VSI's channel structure, may be NULL */
- struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ union {
+ /* VF associated with this VSI, may be NULL */
+ struct ice_vf *vf;
+ /* SF associated with this VSI, may be NULL */
+ struct ice_dynamic_port *sf;
+ };
u32 flags; /* VSI flags used for rebuild and configuration */
enum ice_vsi_type type; /* the type of the VSI */
);
@@ -652,6 +657,9 @@ struct ice_pf {
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
+ struct xarray dyn_ports;
+ struct xarray sf_nums;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -918,6 +926,7 @@ int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
+void ice_set_ethtool_sf_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
@@ -1003,6 +1012,14 @@ void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_change_mtu(struct net_device *netdev, int new_mtu);
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+void ice_set_netdev_features(struct net_device *netdev);
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void ice_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c158749a80e0..4a9a6899fc45 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -325,6 +325,9 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
@@ -540,7 +543,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
- if (ring->vsi->type == ICE_VSI_PF) {
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a94e7072b570..a7c510832824 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
break;
case ICE_VSI_CHNL:
+ case ICE_VSI_SF:
vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
vsi->tc_cfg.numtc = 1;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 3cfa071e3718..c0b3e70a7ea3 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -452,11 +452,9 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf)
ice_eswitch_start_all_tx_queues(pf);
}
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+static int
+ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_repr *repr;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -470,13 +468,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_stop_reprs(pf);
- devl_lock(devlink);
- repr = ice_repr_add_vf(vf);
- devl_unlock(devlink);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
+ err = repr->ops.add(repr);
+ if (err)
goto err_create_repr;
- }
err = ice_eswitch_setup_repr(pf, repr);
if (err)
@@ -486,7 +480,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_xa_alloc;
- vf->repr_id = repr->id;
+ *id = repr->id;
ice_eswitch_start_reprs(pf);
@@ -495,9 +489,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
- devl_unlock(devlink);
+ repr->ops.rem(repr);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -506,14 +498,59 @@ err_create_repr:
return err;
}
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
+/**
+ * ice_eswitch_attach_vf - attach VF to a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be attached
+ *
+ * During attaching port representor for VF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ int err;
- if (!repr)
- return;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ devl_lock(devlink);
+ err = ice_eswitch_attach(pf, repr, &vf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+ devl_unlock(devlink);
+
+ return err;
+}
+
+/**
+ * ice_eswitch_attach_sf - attach SF to a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be attached
+ *
+ * During attaching port representor for SF is created.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create_sf(sf);
+ int err;
+ if (IS_ERR(repr))
+ return PTR_ERR(repr);
+
+ err = ice_eswitch_attach(pf, repr, &sf->repr_id);
+ if (err)
+ ice_repr_destroy(repr);
+
+ return err;
+}
+
+static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr)
+{
ice_eswitch_stop_reprs(pf);
xa_erase(&pf->eswitch.reprs, repr->id);
@@ -521,10 +558,12 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
ice_eswitch_disable_switchdev(pf);
ice_eswitch_release_repr(pf, repr);
- devl_lock(devlink);
- ice_repr_rem_vf(repr);
+ repr->ops.rem(repr);
+ ice_repr_destroy(repr);
if (xa_empty(&pf->eswitch.reprs)) {
+ struct devlink *devlink = priv_to_devlink(pf);
+
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
@@ -533,10 +572,42 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
} else {
ice_eswitch_start_reprs(pf);
}
+}
+
+/**
+ * ice_eswitch_detach_vf - detach VF from a eswitch
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure to be detached
+ */
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id);
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ if (!repr)
+ return;
+
+ devl_lock(devlink);
+ ice_eswitch_detach(pf, repr);
devl_unlock(devlink);
}
/**
+ * ice_eswitch_detach_sf - detach SF from a eswitch
+ * @pf: pointer to PF structure
+ * @sf: pointer to SF structure to be detached
+ */
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id);
+
+ if (!repr)
+ return;
+
+ ice_eswitch_detach(pf, repr);
+}
+
+/**
* ice_eswitch_get_target - get netdev based on src_vsi from descriptor
* @rx_ring: ring used to receive the packet
* @rx_desc: descriptor used to get src_vsi value
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 78fd39a6935d..20ce32dda69c 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,11 +5,13 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
+#include "devlink/devlink_port.h"
#ifdef CONFIG_ICE_SWITCHDEV
-void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
-int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
+void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
+int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf);
+int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -31,10 +33,20 @@ struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
-static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
+static inline void
+ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { }
+
+static inline void
+ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { }
+
+static inline int
+ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
static inline int
-ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
+ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 793a64d44aa3..d5cc934d1359 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4412,7 +4412,7 @@ ice_repr_get_drvinfo(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
@@ -4424,8 +4424,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct ice_repr *repr = ice_netdev_to_repr(netdev);
/* for port representors only ETH_SS_STATS is supported */
- if (ice_check_vf_ready_for_cfg(repr->vf) ||
- stringset != ETH_SS_STATS)
+ if (repr->ops.ready(repr) || stringset != ETH_SS_STATS)
return;
__ice_get_strings(netdev, stringset, data, repr->src_vsi);
@@ -4438,7 +4437,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev,
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
- if (ice_check_vf_ready_for_cfg(repr->vf))
+ if (repr->ops.ready(repr))
return;
__ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 737c00b02dd0..06e712cdc3d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_type.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -20,6 +21,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
+ case ICE_VSI_SF:
+ return "ICE_VSI_SF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
case ICE_VSI_CHNL:
@@ -135,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -201,6 +205,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SF:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
+ vsi->num_q_vectors = 1;
+ vsi->irq_dyn_alloc = true;
+ break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -423,7 +433,7 @@ err_out:
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
*/
-static void ice_vsi_free(struct ice_vsi *vsi)
+void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -559,6 +569,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
@@ -595,7 +606,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -889,6 +900,11 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
+ case ICE_VSI_SF:
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
+ break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1136,6 +1152,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
break;
case ICE_VSI_VF:
+ case ICE_VSI_SF:
/* VF VSI will gets a small RSS table which is a VSI LUT type */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
break;
@@ -1214,6 +1231,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SF:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2095,6 +2113,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SF:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2264,6 +2283,7 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SF:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2413,13 +2433,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
-
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
@@ -2648,7 +2661,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2676,7 +2690,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
@@ -2747,6 +2762,26 @@ void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
}
/**
+ * ice_napi_add - register NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be registered
+ *
+ * This function is only called in the driver's load path. Registering the NAPI
+ * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
+ * reset/rebuild, etc.)
+ */
+void ice_napi_add(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ ice_for_each_q_vector(vsi, v_idx)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
+ ice_napi_poll);
+}
+
+/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -2764,6 +2799,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_rss_clean(vsi);
ice_vsi_close(vsi);
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
ice_vsi_decfg(vsi);
/* retain SW VSI data structure since it is needed to unregister and
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 36d86535695d..1a6cfc8693ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -45,6 +45,7 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
+void ice_napi_add(struct ice_vsi *vsi);
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi);
@@ -59,6 +60,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
int ice_vsi_cfg(struct ice_vsi *vsi);
+struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf);
+void ice_vsi_free(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c7db88b517da..eeb48cc48e08 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -15,6 +15,7 @@
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
#include "devlink/devlink_port.h"
+#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -2974,6 +2975,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
if (avail < cpus / 2)
return -ENOMEM;
+ if (vsi->type == ICE_VSI_SF)
+ avail = vsi->alloc_txq;
+
vsi->num_xdp_txq = min_t(u16, avail, cpus);
if (vsi->num_xdp_txq < cpus)
@@ -3089,14 +3093,14 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
* @dev: netdevice
* @xdp: XDP command
*/
-static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_vsi *vsi = np->vsi;
int ret;
- if (vsi->type != ICE_VSI_PF) {
- NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) {
+ NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI");
return -EINVAL;
}
@@ -3556,26 +3560,6 @@ skip_req_irq:
}
/**
- * ice_napi_add - register NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be registered
- *
- * This function is only called in the driver's load path. Registering the NAPI
- * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
- * reset/rebuild, etc.)
- */
-static void ice_napi_add(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll);
-}
-
-/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
* @vsi: the VSI associated with the new netdev
*/
@@ -3608,7 +3592,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
* ice_set_netdev_features - set features for the given netdev
* @netdev: netdev instance
*/
-static void ice_set_netdev_features(struct net_device *netdev)
+void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
@@ -3790,8 +3774,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding VLAN IDs
*/
-static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -3853,8 +3836,7 @@ finish:
*
* net_device_ops implementation for removing VLAN IDs
*/
-static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
+int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
@@ -4023,6 +4005,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+
+ xa_destroy(&pf->dyn_ports);
+ xa_destroy(&pf->sf_nums);
}
/**
@@ -4116,6 +4101,9 @@ static int ice_init_pf(struct ice_pf *pf)
hash_init(pf->vfs.table);
ice_mbx_init_snapshot(&pf->hw);
+ xa_init(&pf->dyn_ports);
+ xa_init(&pf->sf_nums);
+
return 0;
}
@@ -5363,7 +5351,6 @@ err_load:
ice_deinit(pf);
err_init:
ice_adapter_put(pdev);
- pci_disable_device(pdev);
return err;
}
@@ -5458,6 +5445,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
devl_lock(priv_to_devlink(pf));
+ ice_dealloc_all_dynamic_ports(pf);
ice_deinit_devlink(pf);
ice_unload(pf);
@@ -5470,7 +5458,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf);
ice_adapter_put(pdev);
- pci_disable_device(pdev);
}
/**
@@ -5946,8 +5933,16 @@ static int __init ice_module_init(void)
goto err_dest_lag_wq;
}
+ status = ice_sf_driver_register();
+ if (status) {
+ pr_err("Failed to register SF driver, err %d\n", status);
+ goto err_sf_driver;
+ }
+
return 0;
+err_sf_driver:
+ pci_unregister_driver(&ice_driver);
err_dest_lag_wq:
destroy_workqueue(ice_lag_wq);
ice_debugfs_exit();
@@ -5965,6 +5960,7 @@ module_init(ice_module_init);
*/
static void __exit ice_module_exit(void)
{
+ ice_sf_driver_unregister();
pci_unregister_driver(&ice_driver);
ice_debugfs_exit();
destroy_workqueue(ice_wq);
@@ -6766,7 +6762,8 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev && vsi->type == ICE_VSI_PF) {
+ ((vsi->netdev && (vsi->type == ICE_VSI_PF ||
+ vsi->type == ICE_VSI_SF)))) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -7124,7 +7121,6 @@ void ice_update_pf_stats(struct ice_pf *pf)
* @netdev: network interface device structure
* @stats: main device statistics structure
*/
-static
void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -7465,7 +7461,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
- if (vsi->type == ICE_VSI_PF) {
+ if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
if (err)
@@ -7801,7 +7797,7 @@ clear_recovery:
*
* Returns 0 on success, negative on failure
*/
-static int ice_change_mtu(struct net_device *netdev, int new_mtu)
+int ice_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -8225,7 +8221,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
* @netdev: network interface device structure
* @txqueue: Tx queue
*/
-static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *tx_ring = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index bdda3401e343..00d4a9125dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -59,12 +59,13 @@ static void
ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
struct ice_eth_stats *eth_stats;
struct ice_vsi *vsi;
- if (ice_is_vf_disabled(np->repr->vf))
+ if (repr->ops.ready(repr))
return;
- vsi = np->repr->src_vsi;
+ vsi = repr->src_vsi;
ice_update_vsi_stats(vsi);
eth_stats = &vsi->eth_stats;
@@ -93,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
}
/**
- * ice_repr_open - Enable port representor's network interface
+ * ice_repr_vf_open - Enable port representor's network interface
* @netdev: network interface device structure
*
* The open entry point is called when a port representor's network
@@ -102,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_open(struct net_device *netdev)
+static int ice_repr_vf_open(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -118,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_open(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
/**
- * ice_repr_stop - Disable port representor's network interface
+ * ice_repr_vf_stop - Disable port representor's network interface
* @netdev: network interface device structure
*
* The stop entry point is called when a port representor's network
@@ -128,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev)
*
* Returns 0 on success
*/
-static int ice_repr_stop(struct net_device *netdev)
+static int ice_repr_vf_stop(struct net_device *netdev)
{
struct ice_repr *repr = ice_netdev_to_repr(netdev);
struct ice_vf *vf;
@@ -144,6 +153,14 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
+static int ice_repr_sf_stop(struct net_device *netdev)
+{
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
/**
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
@@ -245,10 +262,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
}
-static const struct net_device_ops ice_repr_netdev_ops = {
+static const struct net_device_ops ice_repr_vf_netdev_ops = {
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_vf_open,
+ .ndo_stop = ice_repr_vf_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
+};
+
+static const struct net_device_ops ice_repr_sf_netdev_ops = {
.ndo_get_stats64 = ice_repr_get_stats64,
- .ndo_open = ice_repr_open,
- .ndo_stop = ice_repr_stop,
+ .ndo_open = ice_repr_sf_open,
+ .ndo_stop = ice_repr_sf_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
.ndo_setup_tc = ice_repr_setup_tc,
.ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
@@ -261,18 +288,20 @@ static const struct net_device_ops ice_repr_netdev_ops = {
*/
bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
- return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+ return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops ||
+ netdev->netdev_ops == &ice_repr_sf_netdev_ops);
}
/**
* ice_repr_reg_netdev - register port representor netdev
* @netdev: pointer to port representor netdev
+ * @ops: new ops for netdev
*/
static int
-ice_repr_reg_netdev(struct net_device *netdev)
+ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops)
{
eth_hw_addr_random(netdev);
- netdev->netdev_ops = &ice_repr_netdev_ops;
+ netdev->netdev_ops = ops;
ice_set_ethtool_repr_ops(netdev);
netdev->hw_features |= NETIF_F_HW_TC;
@@ -283,57 +312,56 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
-static void ice_repr_remove_node(struct devlink_port *devlink_port)
+static int ice_repr_ready_vf(struct ice_repr *repr)
+{
+ return !ice_check_vf_ready_for_cfg(repr->vf);
+}
+
+static int ice_repr_ready_sf(struct ice_repr *repr)
{
- devl_rate_leaf_destroy(devlink_port);
+ return !repr->sf->active;
}
/**
- * ice_repr_rem - remove representor from VF
+ * ice_repr_destroy - remove representor from VF
* @repr: pointer to representor structure
*/
-static void ice_repr_rem(struct ice_repr *repr)
+void ice_repr_destroy(struct ice_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
-/**
- * ice_repr_rem_vf - remove representor from VF
- * @repr: pointer to representor structure
- */
-void ice_repr_rem_vf(struct ice_repr *repr)
+static void ice_repr_rem_vf(struct ice_repr *repr)
{
- ice_repr_remove_node(&repr->vf->devlink_port);
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
- ice_repr_rem(repr);
}
-static void ice_repr_set_tx_topology(struct ice_pf *pf)
+static void ice_repr_rem_sf(struct ice_repr *repr)
{
- struct devlink *devlink;
+ unregister_netdev(repr->netdev);
+ ice_devlink_destroy_sf_port(repr->sf);
+}
+static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink)
+{
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
- devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
- * ice_repr_add - add representor for generic VSI
- * @pf: pointer to PF structure
+ * ice_repr_create - add representor for generic VSI
* @src_vsi: pointer to VSI structure of device to represent
- * @parent_mac: device MAC address
*/
-static struct ice_repr *
-ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
+static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi)
{
struct ice_netdev_priv *np;
struct ice_repr *repr;
@@ -360,7 +388,10 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
np = netdev_priv(repr->netdev);
np->repr = repr;
- ether_addr_copy(repr->parent_mac, parent_mac);
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back));
return repr;
@@ -371,34 +402,18 @@ err_alloc:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
+static int ice_repr_add_vf(struct ice_repr *repr)
{
- struct ice_repr *repr;
- struct ice_vsi *vsi;
+ struct ice_vf *vf = repr->vf;
+ struct devlink *devlink;
int err;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return ERR_PTR(-ENOENT);
-
err = ice_devlink_create_vf_port(vf);
if (err)
- return ERR_PTR(err);
-
- repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
- if (IS_ERR(repr)) {
- err = PTR_ERR(repr);
- goto err_repr_add;
- }
-
- repr->vf = vf;
-
- repr->netdev->min_mtu = ETH_MIN_MTU;
- repr->netdev->max_mtu = ICE_MAX_MTU;
+ return err;
- SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
- err = ice_repr_reg_netdev(repr->netdev);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops);
if (err)
goto err_netdev;
@@ -407,17 +422,97 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
goto err_cfg_vsi;
ice_virtchnl_set_repr_ops(vf);
- ice_repr_set_tx_topology(vf->pf);
- return repr;
+ devlink = priv_to_devlink(vf->pf);
+ ice_repr_set_tx_topology(vf->pf, devlink);
+
+ return 0;
err_cfg_vsi:
unregister_netdev(repr->netdev);
err_netdev:
- ice_repr_rem(repr);
-err_repr_add:
ice_devlink_destroy_vf_port(vf);
- return ERR_PTR(err);
+ return err;
+}
+
+/**
+ * ice_repr_create_vf - add representor for VF VSI
+ * @vf: VF to create port representor on
+ *
+ * Set correct representor type for VF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_repr *repr;
+
+ if (!vsi)
+ return ERR_PTR(-EINVAL);
+
+ repr = ice_repr_create(vsi);
+ if (!repr)
+ return ERR_PTR(-ENOMEM);
+
+ repr->type = ICE_REPR_TYPE_VF;
+ repr->vf = vf;
+ repr->ops.add = ice_repr_add_vf;
+ repr->ops.rem = ice_repr_rem_vf;
+ repr->ops.ready = ice_repr_ready_vf;
+
+ ether_addr_copy(repr->parent_mac, vf->hw_lan_addr);
+
+ return repr;
+}
+
+static int ice_repr_add_sf(struct ice_repr *repr)
+{
+ struct ice_dynamic_port *sf = repr->sf;
+ int err;
+
+ err = ice_devlink_create_sf_port(sf);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port);
+ err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops);
+ if (err)
+ goto err_netdev;
+
+ ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back));
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_sf_port(sf);
+ return err;
+}
+
+/**
+ * ice_repr_create_sf - add representor for SF VSI
+ * @sf: SF to create port representor on
+ *
+ * Set correct representor type for SF and functions pointer.
+ *
+ * Return: created port representor on success, error otherwise
+ */
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf)
+{
+ struct ice_repr *repr = ice_repr_create(sf->vsi);
+
+ if (!repr)
+ return ERR_PTR(-ENOMEM);
+
+ repr->type = ICE_REPR_TYPE_SF;
+ repr->sf = sf;
+ repr->ops.add = ice_repr_add_sf;
+ repr->ops.rem = ice_repr_rem_sf;
+ repr->ops.ready = ice_repr_ready_sf;
+
+ ether_addr_copy(repr->parent_mac, sf->hw_addr);
+
+ return repr;
}
struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 488661b2900b..35bd93165e1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -15,19 +15,35 @@ struct ice_repr_pcpu_stats {
u64 tx_drops;
};
+enum ice_repr_type {
+ ICE_REPR_TYPE_VF,
+ ICE_REPR_TYPE_SF,
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
- struct ice_vf *vf;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
+ enum ice_repr_type type;
+ union {
+ struct ice_vf *vf;
+ struct ice_dynamic_port *sf;
+ };
+ struct {
+ int (*add)(struct ice_repr *repr);
+ void (*rem)(struct ice_repr *repr);
+ int (*ready)(struct ice_repr *repr);
+ } ops;
};
-struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
-void ice_repr_rem_vf(struct ice_repr *repr);
+struct ice_repr *ice_repr_create_vf(struct ice_vf *vf);
+struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf);
+
+void ice_repr_destroy(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
new file mode 100644
index 000000000000..d00389c405c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_txrx.h"
+#include "ice_fltr.h"
+#include "ice_sf_eth.h"
+#include "devlink/devlink_port.h"
+#include "devlink/devlink.h"
+
+static const struct net_device_ops ice_sf_netdev_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+ .ndo_bpf = ice_xdp,
+ .ndo_xdp_xmit = ice_xdp_xmit,
+ .ndo_xsk_wakeup = ice_xsk_wakeup,
+};
+
+/**
+ * ice_sf_cfg_netdev - Allocate, configure and register a netdev
+ * @dyn_port: subfunction associated with configured netdev
+ * @devlink_port: subfunction devlink port to be linked with netdev
+ *
+ * Return: 0 on success, negative value on failure
+ */
+static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port,
+ struct devlink_port *devlink_port)
+{
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
+
+ eth_hw_addr_set(netdev, dyn_port->hw_addr);
+ ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr);
+ netdev->netdev_ops = &ice_sf_netdev_ops;
+ SET_NETDEV_DEVLINK_PORT(netdev, devlink_port);
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ vsi->netdev = NULL;
+ return -ENOMEM;
+ }
+ set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static void ice_sf_decfg_netdev(struct ice_vsi *vsi)
+{
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+}
+
+/**
+ * ice_sf_dev_probe - subfunction driver probe function
+ * @adev: pointer to the auxiliary device
+ * @id: pointer to the auxiliary_device id
+ *
+ * Configure VSI and netdev resources for the subfunction device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_sf_dev_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct ice_pf *pf = dyn_port->pf;
+ struct device *dev = &adev->dev;
+ struct ice_sf_priv *priv;
+ struct devlink *devlink;
+ int err;
+
+ vsi->type = ICE_VSI_SF;
+ vsi->port_info = pf->hw.port_info;
+ vsi->flags = ICE_VSI_FLAG_INIT;
+
+ priv = ice_allocate_sf(&adev->dev, pf);
+ if (!priv) {
+ dev_err(dev, "Subfunction devlink alloc failed");
+ return -ENOMEM;
+ }
+
+ priv->dev = sf_dev;
+ sf_dev->priv = priv;
+ devlink = priv_to_devlink(priv);
+
+ devl_lock(devlink);
+
+ err = ice_vsi_cfg(vsi);
+ if (err) {
+ dev_err(dev, "Subfunction vsi config failed");
+ goto err_free_devlink;
+ }
+ vsi->sf = dyn_port;
+
+ ice_eswitch_update_repr(&dyn_port->repr_id, vsi);
+
+ err = ice_devlink_create_sf_dev_port(sf_dev);
+ if (err) {
+ dev_err(dev, "Cannot add ice virtual devlink port for subfunction");
+ goto err_vsi_decfg;
+ }
+
+ err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port);
+ if (err) {
+ dev_err(dev, "Subfunction netdev config failed");
+ goto err_devlink_destroy;
+ }
+
+ err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink);
+ if (err) {
+ dev_err(dev, "Can't link devlink instance to SF devlink port");
+ goto err_netdev_decfg;
+ }
+
+ ice_napi_add(vsi);
+
+ devl_register(devlink);
+ devl_unlock(devlink);
+
+ dyn_port->attached = true;
+
+ return 0;
+
+err_netdev_decfg:
+ ice_sf_decfg_netdev(vsi);
+err_devlink_destroy:
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+err_vsi_decfg:
+ ice_vsi_decfg(vsi);
+err_free_devlink:
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ return err;
+}
+
+/**
+ * ice_sf_dev_remove - subfunction driver remove function
+ * @adev: pointer to the auxiliary device
+ *
+ * Deinitalize VSI and netdev resources for the subfunction device.
+ */
+static void ice_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+ struct ice_dynamic_port *dyn_port = sf_dev->dyn_port;
+ struct ice_vsi *vsi = dyn_port->vsi;
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->priv);
+ devl_lock(devlink);
+
+ ice_vsi_close(vsi);
+
+ ice_sf_decfg_netdev(vsi);
+ ice_devlink_destroy_sf_dev_port(sf_dev);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+ devlink_free(devlink);
+ ice_vsi_decfg(vsi);
+
+ dyn_port->attached = false;
+}
+
+static const struct auxiliary_device_id ice_sf_dev_id_table[] = {
+ { .name = "ice.sf", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table);
+
+static struct auxiliary_driver ice_sf_driver = {
+ .name = "sf",
+ .probe = ice_sf_dev_probe,
+ .remove = ice_sf_dev_remove,
+ .id_table = ice_sf_dev_id_table
+};
+
+static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id);
+
+/**
+ * ice_sf_driver_register - Register new auxiliary subfunction driver
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver
+ *
+ */
+void ice_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&ice_sf_driver);
+}
+
+/**
+ * ice_sf_dev_release - Release device associated with auxiliary device
+ * @device: pointer to the device
+ *
+ * Since most of the code for subfunction deactivation is handled in
+ * the remove handler, here just free tracking resources.
+ */
+static void ice_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(device);
+ struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev);
+
+ xa_erase(&ice_sf_aux_id, adev->id);
+ kfree(sf_dev);
+}
+
+/**
+ * ice_sf_eth_activate - Activate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ * @extack: extack for reporting error messages
+ *
+ * Activate the dynamic port as an Ethernet subfunction. Setup the netdev
+ * resources associated and initialize the auxiliary device.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int
+ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = dyn_port->pf;
+ struct ice_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ u32 id;
+
+ err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b,
+ GFP_KERNEL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID");
+ return err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ err = -ENOMEM;
+ NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory");
+ goto xa_erase;
+ }
+ pdev = pf->pdev;
+
+ sf_dev->dyn_port = dyn_port;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = "sf";
+ sf_dev->adev.dev.release = ice_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device");
+ goto sf_dev_free;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device");
+ goto aux_dev_uninit;
+ }
+
+ dyn_port->sf_dev = sf_dev;
+
+ return 0;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(&sf_dev->adev);
+sf_dev_free:
+ kfree(sf_dev);
+xa_erase:
+ xa_erase(&ice_sf_aux_id, id);
+
+ return err;
+}
+
+/**
+ * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port
+ * @dyn_port: the dynamic port instance for this subfunction
+ *
+ * Deactivate the Ethernet subfunction, removing its auxiliary device and the
+ * associated resources.
+ */
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port)
+{
+ struct ice_sf_dev *sf_dev = dyn_port->sf_dev;
+
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
new file mode 100644
index 000000000000..c558cad0a183
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _ICE_SF_ETH_H_
+#define _ICE_SF_ETH_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ice.h"
+
+struct ice_sf_dev {
+ struct auxiliary_device adev;
+ struct ice_dynamic_port *dyn_port;
+ struct ice_sf_priv *priv;
+};
+
+struct ice_sf_priv {
+ struct ice_sf_dev *dev;
+ struct devlink_port devlink_port;
+};
+
+static inline struct
+ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct ice_sf_dev, adev);
+}
+
+int ice_sf_driver_register(void);
+void ice_sf_driver_unregister(void);
+
+int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port,
+ struct netlink_ext_ack *extack);
+void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port);
+#endif /* _ICE_SF_ETH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..3d7e96721cf9
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_sf_vsi_vlan_ops.h"
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ vlan_ops = &vsi->outer_vlan_ops;
+ else
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..8c44eafceea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _ICE_SF_VSI_VLAN_OPS_H_
+#define _ICE_SF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 55ef33208456..e34fe2516ccc 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -175,7 +175,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
@@ -598,7 +598,7 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
- retval = ice_eswitch_attach(pf, vf);
+ retval = ice_eswitch_attach_vf(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index fe8847184cb1..79d91e95358c 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3194,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -3264,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_list_info) {
+ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c9bc3f1add5d..8208055d6e7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2368,7 +2368,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back))
+ if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index b9e443232335..45768796691f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -159,6 +159,7 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SF = 9,
};
struct ice_link_status {
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5635e9da2212..a69e91f88d81 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -766,7 +766,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
- ice_eswitch_detach(pf, vf);
+ ice_eswitch_detach_vf(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -782,7 +782,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
- ice_eswitch_attach(pf, vf);
+ ice_eswitch_attach_vf(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 7aae7fdcfcdb..8c7a9b41fb63 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -3,6 +3,7 @@
#include "ice_pf_vsi_vlan_ops.h"
#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sf_vsi_vlan_ops.h"
#include "ice_lib.h"
#include "ice.h"
@@ -77,6 +78,9 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
case ICE_VSI_VF:
ice_vf_vsi_init_vlan_ops(vsi);
break;
+ case ICE_VSI_SF:
+ ice_sf_vsi_init_vlan_ops(vsi);
+ break;
default:
dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
ice_vsi_type_str(vsi->type));
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5dee829bfc47..334ae945d640 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -289,7 +289,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
- if (vsi->type != ICE_VSI_PF)
+ if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF)
return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 3df9935685e9..6c913a703df6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -97,8 +97,10 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = PF_GLINT_DYN_CTL_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+ intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index fe64febf7436..dfd7cf1d9aa0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
@@ -224,6 +225,7 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
+ tx_buf->type = LIBETH_SQE_FRAG;
/* align size to end of page */
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
@@ -237,14 +239,17 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
offsets,
max_data,
td_tag);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
+ tx_buf->type = LIBETH_SQE_EMPTY;
+
dma += max_data;
size -= max_data;
@@ -257,12 +262,14 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = &tx_q->tx_buf[0];
tx_desc = &tx_q->base_tx[0];
i = 0;
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -270,8 +277,6 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
skb_tx_timestamp(first->skb);
@@ -282,13 +287,13 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
size, td_tag);
- IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+ first->type = LIBETH_SQE_SKB;
+ first->rs_idx = i;
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -306,8 +311,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
- memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[ntu].ctx_entry = true;
+ txq->tx_buf[ntu].type = LIBETH_SQE_CTX;
ctx_desc = &txq->base_ctx[ntu];
@@ -371,6 +375,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
IDPF_TX_DESCS_FOR_CTX)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
return NETDEV_TX_BUSY;
}
@@ -396,11 +404,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = offload.tso_segs;
- first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ first->packets = offload.tso_segs;
+ first->bytes = skb->len + ((first->packets - 1) * offload.tso_hdr_len);
} else {
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
- first->gso_segs = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
}
idpf_tx_singleq_map(tx_q, first, &offload);
@@ -420,10 +428,15 @@ out_drop:
static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int total_bytes = 0, total_pkts = 0;
+ struct libeth_sq_napi_stats ss = { };
struct idpf_base_tx_desc *tx_desc;
u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = &ss,
+ .napi = napi_budget,
+ };
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
struct netdev_queue *nq;
@@ -441,47 +454,26 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
* such. We can skip this descriptor since there is no buffer
* to clean.
*/
- if (tx_buf->ctx_entry) {
- /* Clear this flag here to avoid stale flag values when
- * this buffer is used for actual data in the future.
- * There are cases where the tx_buf struct / the flags
- * field will not be cleared before being reused.
- */
- tx_buf->ctx_entry = false;
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX)) {
+ tx_buf->type = LIBETH_SQE_EMPTY;
goto fetch_next_txq_desc;
}
- /* if next_to_watch is not set then no work pending */
- eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
- if (!eop_desc)
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
break;
- /* prevent any other reads prior to eop_desc */
+ /* prevent any other reads prior to type */
smp_rmb();
+ eop_desc = &tx_q->base_tx[tx_buf->rs_idx];
+
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->qw1 &
cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
-
/* update the statistics for this packet */
- total_bytes += tx_buf->bytecount;
- total_pkts += tx_buf->gso_segs;
-
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- /* unmap skb header data */
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
- dma_unmap_len_set(tx_buf, len, 0);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -495,13 +487,7 @@ static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
}
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
/* update budget only if we did something */
@@ -521,11 +507,11 @@ fetch_next_txq_desc:
ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
- *cleaned += total_pkts;
+ *cleaned += ss.packets;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, ss.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, ss.bytes);
u64_stats_update_end(&tx_q->stats_sync);
np = netdev_priv(tx_q->netdev);
@@ -533,7 +519,7 @@ fetch_next_txq_desc:
dont_wake = np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
- __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ __netif_txq_completed_wake(nq, ss.packets, ss.bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -1134,8 +1120,10 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
&work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -1144,6 +1132,8 @@ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return work_done;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 585c3dadd9bf..d4e6f0e10487 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,10 +2,19 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct libeth_sqe buf;
+};
+
+#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(u32);
+
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
@@ -61,41 +70,20 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf)
-{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
-
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct libeth_sq_napi_stats ss = { };
struct idpf_buf_lifo *buf_stack;
- u16 i;
+ struct idpf_tx_stash *stash;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+ struct hlist_node *tmp;
+ u32 i, tag;
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
@@ -103,7 +91,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
@@ -115,6 +103,20 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
if (!buf_stack->bufs)
return;
+ /*
+ * If a Tx timeout occurred, there are potentially still bufs in the
+ * hash table, free them here.
+ */
+ hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
+ hlist) {
+ if (!stash)
+ continue;
+
+ libeth_tx_complete(&stash->buf, &cp);
+ hash_del(&stash->hlist);
+ idpf_buf_lifo_push(buf_stack, stash);
+ }
+
for (i = 0; i < buf_stack->size; i++)
kfree(buf_stack->bufs[i]);
@@ -131,6 +133,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
idpf_tx_buf_rel_all(txq);
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
if (!txq->desc_ring)
return;
@@ -203,10 +206,6 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
@@ -1656,37 +1655,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
-{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
-/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1696,33 +1664,28 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
*/
static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
struct idpf_tx_stash *stash;
struct hlist_node *tmp_buf;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
/* Buffer completion */
hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
+ hash_del(&stash->hlist);
+ libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
-
- hash_del(&stash->hlist);
}
}
@@ -1737,8 +1700,7 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
{
struct idpf_tx_stash *stash;
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
return 0;
stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
@@ -1751,29 +1713,27 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
/* Store buffer params in shadow buffer */
stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
+ stash->buf.bytes = tx_buf->bytes;
+ stash->buf.packets = tx_buf->packets;
+ stash->buf.type = tx_buf->type;
+ stash->buf.nr_frags = tx_buf->nr_frags;
dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
+ idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
/* Add buffer to buf_hash table to be freed later */
hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ idpf_tx_buf_compl_tag(&stash->buf));
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf->type = LIBETH_SQE_EMPTY;
return 0;
}
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
- (ntc)++; \
- if (unlikely(!(ntc))) { \
- ntc -= (txq)->desc_count; \
+ if (unlikely(++(ntc) == (txq)->desc_count)) { \
+ ntc = 0; \
buf = (txq)->tx_buf; \
desc = &(txq)->flex_tx[0]; \
} else { \
@@ -1797,69 +1757,71 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
+ *
+ * Furthermore, in flow scheduling mode, check to make sure there are enough
+ * reserve buffers to stash the packet. If there are not, return early, which
+ * will leave next_to_clean pointing to the packet that failed to be stashed.
+ *
+ * Return: false in the scenario above, true otherwise.
*/
-static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
- s16 ntc = tx_q->next_to_clean;
+ u32 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
+ bool clean_complete = true;
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
- ntc -= tx_q->desc_count;
while (tx_desc != next_pending_desc) {
- union idpf_tx_flex_desc *eop_desc;
+ u32 eop_idx;
/* If this entry in the ring was used as a context descriptor,
- * it's corresponding entry in the buffer ring will have an
- * invalid completion tag since no buffer was used. We can
- * skip this descriptor since there is no buffer to clean.
+ * it's corresponding entry in the buffer ring is reserved. We
+ * can skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
+ if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
+ break;
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ eop_idx = tx_buf->rs_idx;
if (descs_only) {
- if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
+ if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
+ clean_complete = false;
goto tx_splitq_clean_out;
+ }
+
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
-
- if (dma_unmap_len(tx_buf, len)) {
- if (idpf_stash_flow_sch_buffers(tx_q,
- tx_buf))
- goto tx_splitq_clean_out;
- }
+ idpf_stash_flow_sch_buffers(tx_q, tx_buf);
}
} else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
- while (tx_desc != eop_desc) {
+ while (ntc != eop_idx) {
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
tx_desc, tx_buf);
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
}
@@ -1868,8 +1830,9 @@ fetch_next_txq_desc:
}
tx_splitq_clean_out:
- ntc += tx_q->desc_count;
tx_q->next_to_clean = ntc;
+
+ return clean_complete;
}
#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
@@ -1895,57 +1858,68 @@ do { \
* this completion tag.
*/
static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
- u16 ntc = txq->next_to_clean;
- u16 num_descs_cleaned = 0;
- u16 orig_idx = idx;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
+ u16 ntc, orig_idx = idx;
tx_buf = &txq->tx_buf[idx];
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
+ idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
+ return false;
+
+ if (tx_buf->type == LIBETH_SQE_SKB)
+ libeth_tx_complete(tx_buf, &cp);
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
- num_descs_cleaned++;
+ while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ libeth_tx_complete(tx_buf, &cp);
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
}
- /* If we didn't clean anything on the ring for this completion, there's
- * nothing more to do.
- */
- if (unlikely(!num_descs_cleaned))
- return false;
-
- /* Otherwise, if we did clean a packet on the ring directly, it's safe
- * to assume that the descriptors starting from the original
- * next_to_clean up until the previously cleaned packet can be reused.
- * Therefore, we will go back in the ring and stash any buffers still
- * in the ring into the hash table to be cleaned later.
+ /*
+ * It's possible the packet we just cleaned was an out of order
+ * completion, which means we can stash the buffers starting from
+ * the original next_to_clean and reuse the descriptors. We need
+ * to compare the descriptor ring next_to_clean packet's "first" buffer
+ * to the "first" buffer of the packet we just cleaned to determine if
+ * this is the case. Howevever, next_to_clean can point to either a
+ * reserved buffer that corresponds to a context descriptor used for the
+ * next_to_clean packet (TSO packet) or the "first" buffer (single
+ * packet). The orig_idx from the packet we just cleaned will always
+ * point to the "first" buffer. If next_to_clean points to a reserved
+ * buffer, let's bump ntc once and start the comparison from there.
*/
+ ntc = txq->next_to_clean;
tx_buf = &txq->tx_buf[ntc];
- while (tx_buf != &txq->tx_buf[orig_idx]) {
- idpf_stash_flow_sch_buffers(txq, tx_buf);
+
+ if (tx_buf->type == LIBETH_SQE_CTX)
idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
- }
- /* Finally, update next_to_clean to reflect the work that was just done
- * on the ring, if any. If the packet was only cleaned from the hash
- * table, the ring will not be impacted, therefore we should not touch
- * next_to_clean. The updated idx is used here
+ /*
+ * If ntc still points to a different "first" buffer, clean the
+ * descriptor ring and stash all of the buffers for later cleaning. If
+ * we cannot stash all of the buffers, next_to_clean will point to the
+ * "first" buffer of the packet that could not be stashed and cleaning
+ * will start there next time.
+ */
+ if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
+ !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
+ true)))
+ return true;
+
+ /*
+ * Otherwise, update next_to_clean to reflect the cleaning that was
+ * done above.
*/
txq->next_to_clean = idx;
@@ -1965,7 +1939,7 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
*/
static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 compl_tag;
@@ -1973,7 +1947,8 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
- return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ return;
}
compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
@@ -2008,7 +1983,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
+ struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
@@ -2158,29 +2133,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
}
/**
- * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
- * @tx_q: the queue to be checked
- * @size: number of descriptors we want to assure is available
- *
- * Returns 0 if stop is not needed
- */
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
-{
- struct netdev_queue *nq;
-
- if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
- return 0;
-
- u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.q_busy);
- u64_stats_update_end(&tx_q->stats_sync);
-
- nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
-
- return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
-}
-
-/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
@@ -2191,7 +2143,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
- goto splitq_stop;
+ goto out;
/* If there are too many outstanding completions expected on the
* completion queue, stop the TX queue to give the device some time to
@@ -2210,10 +2162,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
return 0;
splitq_stop:
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+
+out:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2236,7 +2190,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
- idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+ if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -2307,6 +2265,12 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
@@ -2316,7 +2280,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
@@ -2395,6 +2359,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
tx_buf = first;
+ first->nr_frags = 0;
params->compl_tag =
(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
@@ -2405,7 +2370,9 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
- tx_buf->compl_tag = params->compl_tag;
+ first->nr_frags++;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -2459,14 +2426,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
- tx_desc++;
- i++;
-
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
/* Since this packet has a buffer that is going to span
@@ -2479,8 +2447,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_EMPTY;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
@@ -2504,13 +2471,15 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
break;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
- tx_desc++;
- i++;
- if (i == tx_q->desc_count) {
+ if (unlikely(++i == tx_q->desc_count)) {
+ tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ } else {
+ tx_buf++;
+ tx_desc++;
}
size = skb_frag_size(frag);
@@ -2518,26 +2487,24 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
-
- tx_buf = &tx_q->tx_buf[i];
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -2737,8 +2704,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ txq->tx_buf[i].type = LIBETH_SQE_CTX;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
@@ -2822,12 +2788,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
} else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
@@ -3749,6 +3715,7 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ q_vector->wb_on_itr = false;
intval = idpf_vport_intr_buildreg_itr(q_vector,
IDPF_NO_ITR_UPDATE_IDX, 0);
@@ -4051,8 +4018,10 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ idpf_vport_intr_set_wb_on_itr(q_vector);
return budget;
+ }
work_done = min_t(int, work_done, budget - 1);
@@ -4061,6 +4030,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
if (likely(napi_complete_done(napi, work_done)))
idpf_vport_intr_update_itr_ena_irq(q_vector);
+ else
+ idpf_vport_intr_set_wb_on_itr(q_vector);
/* Switch to poll mode in the tear-down path after sending disable
* queues virtchnl message, as the interrupts will be disabled after
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 6215dbee5546..f0537826f840 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -127,11 +127,10 @@ do { \
*/
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
- 0 : U64_MAX) + \
+ 0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
/* Adjust the generation for the completion tag and wrap if necessary */
#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
@@ -149,47 +148,7 @@ union idpf_tx_flex_desc {
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
-/**
- * struct idpf_tx_buf
- * @next_to_watch: Next descriptor to clean
- * @skb: Pointer to the skb
- * @dma: DMA address
- * @len: DMA length
- * @bytecount: Number of bytes
- * @gso_segs: Number of GSO segments
- * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
- * with completion tag returned in buffer completion event.
- * Because the completion tag is expected to be the same in all
- * data descriptors for a given packet, and a single packet can
- * span multiple buffers, we need this field to track all
- * buffers associated with this completion tag independently of
- * the buf_id. The tag consists of a N bit buf_id and M upper
- * order "generation bits". See compl_tag_bufid_m and
- * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
- * to indicate the tag is not valid.
- * @ctx_entry: Singleq only. Used to indicate the corresponding entry
- * in the descriptor ring was used for a context descriptor and
- * this buffer entry should be skipped.
- */
-struct idpf_tx_buf {
- void *next_to_watch;
- struct sk_buff *skb;
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- unsigned int bytecount;
- unsigned short gso_segs;
-
- union {
- int compl_tag;
-
- bool ctx_entry;
- };
-};
-
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct idpf_tx_buf buf;
-};
+#define idpf_tx_buf libeth_sqe
/**
* struct idpf_buf_lifo - LIFO for managing OOO completions
@@ -390,9 +349,11 @@ struct idpf_vec_regs {
* struct idpf_intr_reg
* @dyn_ctl: Dynamic control interrupt register
* @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
* @dyn_ctl_itridx_s: Register bit offset for ITR index
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -401,9 +362,11 @@ struct idpf_vec_regs {
struct idpf_intr_reg {
void __iomem *dyn_ctl;
u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_intena_msk_m;
u32 dyn_ctl_itridx_s;
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
+ u32 dyn_ctl_wb_on_itr_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -424,6 +387,7 @@ struct idpf_intr_reg {
* @intr_reg: See struct idpf_intr_reg
* @napi: napi handler
* @total_events: Number of interrupts processed
+ * @wb_on_itr: whether WB on ITR is enabled
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -454,6 +418,7 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(read_write);
struct napi_struct napi;
u16 total_events;
+ bool wb_on_itr;
struct dim tx_dim;
u16 tx_itr_value;
@@ -472,7 +437,7 @@ struct idpf_q_vector {
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+libeth_cacheline_set_assert(struct idpf_q_vector, 112,
424 + 2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
@@ -496,11 +461,6 @@ struct idpf_tx_queue_stats {
u64_stats_t dma_map_errs;
};
-struct idpf_cleaned_stats {
- u32 packets;
- u32 bytes;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -688,7 +648,7 @@ struct idpf_tx_queue {
void *desc_ring;
};
- struct idpf_tx_buf *tx_buf;
+ struct libeth_sqe *tx_buf;
struct idpf_txq_group *txq_grp;
struct device *dev;
void __iomem *tail;
@@ -831,7 +791,7 @@ struct idpf_compl_queue {
u32 next_to_use;
u32 next_to_clean;
- u32 num_completions;
+ aligned_u64 num_completions;
__cacheline_group_end_aligned(read_write);
__cacheline_group_begin_aligned(cold);
@@ -963,7 +923,7 @@ struct idpf_txq_group {
struct idpf_compl_queue *complq;
- u32 num_completions_pending;
+ aligned_u64 num_completions_pending;
};
static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
@@ -1033,6 +993,25 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
+/**
+ * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
+ * @q_vector: pointer to queue vector struct
+ */
+static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
+{
+ struct idpf_intr_reg *reg;
+
+ if (q_vector->wb_on_itr)
+ return;
+
+ q_vector->wb_on_itr = true;
+ reg = &q_vector->intr_reg;
+
+ writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
+ (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
+ reg->dyn_ctl);
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1064,7 +1043,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
@@ -1073,4 +1051,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
+ u32 needed)
+{
+ return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+ IDPF_DESC_UNUSED(tx_q),
+ needed, needed);
+}
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 629cb5cb7c9f..99b8dbaf4225 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -97,7 +97,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl = idpf_get_reg_addr(adapter,
reg_vals[vec_id].dyn_ctl_reg);
intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+ intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9dc7c60838ed..1ef4cb871452 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -33,6 +33,7 @@
#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{
+ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*/
@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++;
}
- __netif_tx_unlock(nq);
-
if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
+
return nxmit;
}
@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ struct sk_buff *skb = rx_ring->skb;
+ int cpu = smp_processor_id();
unsigned int xdp_xmit = 0;
+ struct netdev_queue *nq;
struct xdp_buff xdp;
u32 frame_sz = 0;
int rx_buf_pgcnt;
@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring);
+ __netif_tx_unlock(nq);
}
u64_stats_update_begin(&rx_ring->rx_syncp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 43b1d83686d1..5016ba82e142 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -319,6 +319,7 @@ struct nix_mark_format {
/* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx {
+ u16 schq;
u64 cir_off;
u64 cir_val;
u64 pir_off;
@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */
struct nix_smq_flush_ctx {
int smq;
- u16 tl1_schq;
- u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 222f9e00b836..82832a24fbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
+ smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) {
- smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0;
parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq);
@@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ int tl2, tl2_schq;
u64 regoff;
- int tl2;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
/* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
+ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */
- if (tl2 == smq_flush_ctx->tl2_schq)
+ if (tl2 == tl2_schq)
continue;
/* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue;
/* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
+ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK)))
continue;
/* enable/disable XOFF */
@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
struct nix_smq_flush_ctx *smq_flush_ctx;
+ int err, restore_tx_en = 0, i;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- int err, restore_tx_en = 0;
- u64 cfg;
+ u16 tl2_tl3_link_schq;
+ u8 link, link_level;
+ u64 cfg, bmap = 0;
if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */
@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
- /* Do SMQ flush and set enqueue xoff */
- cfg |= BIT_ULL(50) | BIT_ULL(49);
- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
-
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
+ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
+ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
+
+ /* SMQ set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
+ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+ bmap |= (1 << i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
+ /* Do SMQ flush and set enqueue xoff */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq);
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+ if (!(bmap & (1 << i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ cfg |= BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+ }
+
/* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 685335832a93..ea6070180c96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -172,6 +172,16 @@ config MLX5_SW_STEERING
help
Build support for software-managed steering in the NIC.
+config MLX5_HW_STEERING
+ bool "Mellanox Technologies hardware-managed steering"
+ depends on MLX5_CORE_EN && MLX5_ESWITCH
+ default y
+ help
+ Build support for Hardware-Managed Flow Steering (HMFS) in the NIC.
+ HMFS is a new approach to managing steering rules where STEs are
+ written to ICM by HW (as opposed to SW in software-managed steering),
+ which allows higher rate of rule insertion.
+
config MLX5_SF
bool "Mellanox Technologies subfunction device support using auxiliary device"
depends on MLX5_CORE && MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1289475e7be7..5912f7e614f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -119,6 +119,27 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_action.o steering/fs_dr.o \
steering/dr_definer.o steering/dr_ptrn.o \
steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+
+#
+# HW Steering
+#
+mlx5_core-$(CONFIG_MLX5_HW_STEERING) += steering/hws/mlx5hws_cmd.o \
+ steering/hws/mlx5hws_context.o \
+ steering/hws/mlx5hws_pat_arg.o \
+ steering/hws/mlx5hws_buddy.o \
+ steering/hws/mlx5hws_pool.o \
+ steering/hws/mlx5hws_table.o \
+ steering/hws/mlx5hws_action.o \
+ steering/hws/mlx5hws_rule.o \
+ steering/hws/mlx5hws_matcher.o \
+ steering/hws/mlx5hws_send.o \
+ steering/hws/mlx5hws_definer.o \
+ steering/hws/mlx5hws_bwc.o \
+ steering/hws/mlx5hws_debug.o \
+ steering/hws/mlx5hws_vport.o \
+ steering/hws/mlx5hws_bwc_complex.o
+
+
#
# SF device
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 20768ef2e9d2..9af8ddb4a78f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
+ case MLX5_CMD_STAT_NOT_READY:
+ return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
@@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
@@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
+ u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
+ status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
- opcode != MLX5_CMD_OP_CREATE_UCTX)
+ opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index ddf1b87f1bc0..9aed29fa4900 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
- __entry->action = fte->action.action;
+ __entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
- __entry->flow_tag = fte->flow_context.flow_tag;
- __entry->flow_source = fte->flow_context.flow_source;
+ __entry->flow_tag = fte->act_dests.flow_context.flow_tag;
+ __entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
@@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
- __entry->index = __entry->fte->dests_size - 1;
+ __entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 4d123dae912c..1966736f98b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -137,6 +137,10 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
@@ -202,6 +206,12 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47e7a80d221b..a5659c0c4236 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1016,30 +1016,31 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
- struct bpf_prog *old_prog;
-
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
- old_prog = rcu_dereference_protected(rq->xdp_prog,
- lockdep_is_held(&rq->priv->state_lock));
- if (old_prog)
- bpf_prog_put(old_prog);
- }
+ kvfree(rq->dim);
+ page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
- mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
- kvfree(rq->dim);
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&rq->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index de9d01036c28..8e24ba96c779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nodata_packets++;
stats->hds_nodata_bytes += head_size;
}
+ } else {
+ stats->hds_nosplit_packets++;
+ stats->hds_nosplit_bytes += data_bcnt;
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e7a3290a708a..611ec4b6f370 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
+ s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
+ s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4c5858c1dd82..5961c569cfe0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -156,6 +156,8 @@ struct mlx5e_sw_stats {
u64 rx_gro_large_hds;
u64 rx_hds_nodata_packets;
u64 rx_hds_nodata_bytes;
+ u64 rx_hds_nosplit_packets;
+ u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -356,6 +358,8 @@ struct mlx5e_rq_stats {
u64 gro_large_hds;
u64 hds_nodata_packets;
u64 hds_nodata_bytes;
+ u64 hds_nosplit_packets;
+ u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index f15ecaef1331..2505f90c0b39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -896,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
- af_desc.is_managed = 1;
+ af_desc.is_managed = false;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 255bc8b749f9..8587cd572da5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
return -EPERM;
mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
err = -EOPNOTSUPP;
goto out;
}
@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
if (!mlx5_esw_allowed(esw))
return -EPERM;
- if (esw->mode != MLX5_ESWITCH_LEGACY)
+ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 20146a2dc7f4..02a3563f51ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -312,6 +312,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
return err;
}
+static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+{
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TSAR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
@@ -323,6 +342,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
void *vport_elem;
int err;
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
+ return -EOPNOTSUPP;
+
parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
@@ -421,6 +443,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_esw_rate_group *group;
+ __be32 *attr;
u32 divider;
int err;
@@ -428,6 +451,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
if (!group)
return ERR_PTR(-ENOMEM);
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
+
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
esw->qos.root_tsar_ix);
err = mlx5_create_scheduling_element_cmd(esw->dev,
@@ -526,25 +555,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
return err;
}
-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
-{
- switch (type) {
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TSAR;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
- return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
- }
- return false;
-}
-
static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
@@ -555,7 +565,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
+ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
return -EOPNOTSUPP;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9b8599c200e2..676005854dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
- fte->action.exe_aso.object_id);
+ fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
- fte->action.exe_aso.return_reg_id);
+ fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
- fte->action.exe_aso.type);
+ fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
- fte->action.exe_aso.flow_meter.init_color);
+ fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
- fte->action.exe_aso.flow_meter.meter_idx);
+ fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
- inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
@@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
- fte->flow_context.flow_tag);
+ fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
- !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
+ !!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
- action = fte->action.action;
+ action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
- if (!extended_dest && fte->action.pkt_reformat) {
- struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
+ if (!extended_dest && fte->act_dests.action.pkt_reformat) {
+ struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
@@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
goto err_out;
}
} else {
- reformat_id = fte->action.pkt_reformat->id;
+ reformat_id = fte->act_dests.action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
- if (fte->action.modify_hdr) {
- if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
+ if (fte->act_dests.action.modify_hdr) {
+ if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
- fte->action.modify_hdr->id);
+ fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
- fte->action.crypto.type);
+ fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
- fte->action.crypto.obj_id);
+ fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
- MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
- MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
- MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+ MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
@@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- return 0;
+ return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 53e0e5137d3f..7eb7b3ffe3d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
+
+static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
+{
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
+ return true;
+
+ return false;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a47d6419160d..8505d5e241e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
- fte->modify_mask = 0;
+ fte->act_dests.modify_mask = 0;
+}
+
+static void del_sw_hw_dup_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ trace_mlx5_fs_del_rule(rule);
+
+ if (is_fwd_next_action(rule->sw_action)) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+
+ /* If a pending rule is being deleted it means
+ * this is a NO APPEND rule, so there are no partial deletions,
+ * all the rules of the mlx5_flow_handle are going to be deleted
+ * and the rules aren't shared with any other mlx5_flow_handle instance
+ * so no need to do any bookkeeping like in del_sw_hw_rule().
+ */
+
+ kfree(rule);
}
static void del_sw_hw_rule(struct fs_node *node)
@@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node)
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
- --fte->dests_size;
- fte->modify_mask |=
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
- --fte->dests_size;
- fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ --fte->act_dests.dests_size;
+ fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if (is_fwd_dest_type(rule->dest_attr.type)) {
- --fte->dests_size;
- --fte->fwd_dests;
+ --fte->act_dests.dests_size;
+ --fte->act_dests.fwd_dests;
- if (!fte->fwd_dests)
- fte->action.action &=
+ if (!fte->act_dests.fwd_dests)
+ fte->act_dests.action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte->modify_mask |=
+ fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
@@ -658,12 +683,33 @@ out:
kfree(rule);
}
+static void switch_to_pending_act_dests(struct fs_fte *fte)
+{
+ struct fs_node *iter;
+
+ memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests));
+
+ list_bulk_move_tail(&fte->node.children,
+ fte->dup->children.next,
+ fte->dup->children.prev);
+
+ list_for_each_entry(iter, &fte->node.children, list)
+ iter->del_sw_func = del_sw_hw_rule;
+
+ /* Make sure the fte isn't deleted
+ * as mlx5_del_flow_rules() decreases the refcount
+ * of the fte to trigger deletion.
+ */
+ tree_get_node(&fte->node);
+}
+
static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
+ bool pending_used = false;
struct fs_fte *fte;
int err;
@@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
- WARN_ON(fte->dests_size);
+ WARN_ON(fte->act_dests.dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
+
+ if (fte->dup && !list_empty(&fte->dup->children)) {
+ switch_to_pending_act_dests(fte);
+ pending_used = true;
+ } else {
+ /* Avoid double call to del_hw_fte */
+ node->del_hw_func = NULL;
+ }
+
if (node->active) {
- err = root->cmds->delete_fte(root, ft, fte);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
- node->active = false;
+ if (pending_used) {
+ err = root->cmds->update_fte(root, ft, fg,
+ fte->act_dests.modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't update to pending rule in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ fte->act_dests.modify_mask = 0;
+ } else {
+ err = root->cmds->delete_fte(root, ft, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ node->active = false;
+ }
}
}
@@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_free(&fg->fte_allocator, fte->index - fg->start_index);
+ kvfree(fte->dup);
kmem_cache_free(steering->ftes_cache, fte);
}
@@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->action = *flow_act;
- fte->flow_context = spec->flow_context;
+ fte->act_dests.action = *flow_act;
+ fte->act_dests.flow_context = spec->flow_context;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
+static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *tmp_rule;
+ struct fs_node *iter;
+
+ if (!fte->dup || list_empty(&fte->dup->children))
+ return false;
+
+ list_for_each_entry(iter, &fte->dup->children, list) {
+ tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
+
+ if (tmp_rule == rule)
+ return true;
+ }
+
+ return false;
+}
+
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_root_namespace *root;
+ struct fs_fte_action *act_dests;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
+ bool pending = false;
struct fs_fte *fte;
int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
- if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+
+ pending = rule_is_pending(fte, rule);
+ if (pending)
+ act_dests = &fte->dup->act_dests;
+ else
+ act_dests = &fte->act_dests;
+
+ if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
@@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(root, ft, fg,
- modify_mask, fte);
+ if (!pending)
+ err = root->cmds->update_fte(root, ft, fg,
+ modify_mask, fte);
up_write_ref_node(&fte->node, false);
return err;
@@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
return handle;
}
+static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle,
+ int i)
+{
+ for (; --i >= 0;) {
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ kfree(handle);
+}
+
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
@@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
{
for (; --i >= 0;) {
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
- fte->dests_size--;
+ fte->act_dests.dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
@@ -1469,6 +1573,61 @@ static void destroy_flow_handle(struct fs_fte *fte,
}
static struct mlx5_flow_handle *
+create_flow_handle_dup(struct list_head *children,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ struct fs_fte_action *act_dests)
+{
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_handle *handle;
+ int i = 0;
+ int type;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return NULL;
+
+ do {
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, children);
+ else
+ list_add_tail(&rule->node.list, children);
+
+ if (dest) {
+ act_dests->dests_size++;
+
+ if (is_fwd_dest_type(dest[i].type))
+ act_dests->fwd_dests++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ act_dests->modify_mask |= type ? count : dst;
+ }
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle_dup(handle, i);
+ act_dests->dests_size = 0;
+ act_dests->fwd_dests = 0;
+
+ return NULL;
+}
+
+static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
int dest_num,
@@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte,
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
- fte->dests_size++;
+ fte->act_dests.dests_size++;
if (is_fwd_dest_type(dest[i].type))
- fte->fwd_dests++;
+ fte->act_dests.fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
- if (check_conflicting_actions(flow_act, &fte->action)) {
+ if (check_conflicting_actions(flow_act, &fte->act_dests.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
- fte->flow_context.flow_tag != flow_context->flow_tag) {
+ fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
- fte->flow_context.flow_tag,
+ fte->act_dests.flow_context.flow_tag,
flow_context->flow_tag);
return -EEXIST;
}
@@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
- old_action = fte->action.action;
- fte->action.action |= flow_act->action;
+ old_action = fte->act_dests.action.action;
+ fte->act_dests.action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
- fte->action.action = old_action;
+ fte->act_dests.action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@@ -1961,6 +2120,62 @@ out:
return fte_tmp;
}
+/* Native capability lacks support for adding an additional match with the same value
+ * to the same flow group. To accommodate the NO APPEND flag in these scenarios,
+ * we include the new rule in the existing flow table entry (fte) without immediate
+ * hardware commitment. When a request is made to delete the corresponding hardware rule,
+ * we then commit the pending rule to hardware.
+ */
+static struct mlx5_flow_handle *
+add_rule_dup_match_fte(struct fs_fte *fte,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+{
+ struct mlx5_flow_handle *handle;
+ struct fs_fte_dup *dup;
+ int i = 0;
+
+ if (!fte->dup) {
+ dup = kvzalloc(sizeof(*dup), GFP_KERNEL);
+ if (!dup)
+ return ERR_PTR(-ENOMEM);
+ /* dup will be freed when the fte is freed
+ * this way we don't allocate / free dup on every rule deletion
+ * or creation
+ */
+ INIT_LIST_HEAD(&dup->children);
+ fte->dup = dup;
+ }
+
+ if (!list_empty(&fte->dup->children)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Can have only a single duplicate rule\n");
+
+ return ERR_PTR(-EEXIST);
+ }
+
+ fte->dup->act_dests.action = *flow_act;
+ fte->dup->act_dests.flow_context = spec->flow_context;
+ fte->dup->act_dests.dests_size = 0;
+ fte->dup->act_dests.fwd_dests = 0;
+ fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+ handle = create_flow_handle_dup(&fte->dup->children,
+ dest, dest_num,
+ &fte->dup->act_dests);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < handle->num_rules; i++) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ trace_mlx5_fs_add_rule(handle->rule[i]);
+ }
+
+ return handle;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
int ft_version)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
struct match_list *iter;
@@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
- if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ if (flow_act->flags & FLOW_ACT_NO_APPEND &&
+ (root->cmds->get_capabilities(root, root->table_type) &
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH))
goto skip_search;
version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
@@ -1997,7 +2215,10 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
- rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num);
+ else
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
@@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
tree_remove_node(&handle->rule[i]->node, true);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
- /* Avoid double call to del_hw_fte */
- fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
- } else if (fte->dests_size) {
- if (fte->modify_mask)
+ } else if (fte->act_dests.dests_size) {
+ if (fte->act_dests.modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
@@ -3590,8 +3809,8 @@ out:
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
-static struct mlx5_flow_root_namespace
-*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_namespace *ns;
@@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, modify_hdr->ns_type);
+ root = mlx5_get_root_namespace(dev, modify_hdr->ns_type);
if (WARN_ON(!root))
return;
root->cmds->modify_header_dealloc(root, modify_hdr);
@@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_flow_root_namespace *root;
int err;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, pkt_reformat->ns_type);
+ root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type);
if (WARN_ON(!root))
return;
root->cmds->packet_reformat_dealloc(root, pkt_reformat);
@@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer;
int id;
- root = get_root_namespace(dev, ns_type);
+ root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
- root = get_root_namespace(dev, definer->ns_type);
+ root = mlx5_get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 78eb6b7097e1..964937f17cf5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -110,7 +110,9 @@ enum fs_flow_table_type {
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
FS_FT_PORT_SEL = 0X9,
- FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_MAX_TYPE = FS_FT_FDB_TX,
};
enum fs_flow_table_op_mod {
@@ -131,6 +133,7 @@ enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
+ MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
@@ -228,20 +231,29 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
+struct fs_fte_action {
+ int modify_mask;
+ u32 dests_size;
+ u32 fwd_dests;
+ struct mlx5_flow_context flow_context;
+ struct mlx5_flow_act action;
+};
+
+struct fs_fte_dup {
+ struct list_head children;
+ struct fs_fte_action act_dests;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 dests_size;
- u32 fwd_dests;
+ struct fs_fte_action act_dests;
+ struct fs_fte_dup *dup;
u32 index;
- struct mlx5_flow_context flow_context;
- struct mlx5_flow_act action;
enum fs_fte_status status;
- struct mlx5_fc *counter;
struct rhash_head hash;
- int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
@@ -368,7 +380,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index b43ca0b762c3..4f55e55ecb55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -26,6 +26,7 @@ struct mlx5_fw_reset {
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
unsigned long reset_flags;
+ u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
@@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
- u8 *reset_type, u8 *reset_state)
+ u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
+ if (reset_method)
+ *reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
- return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
+}
+
+static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
+ u8 *reset_method)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
+ *reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
+ return 0;
+ }
+
+ return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
@@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
- if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
+ if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
@@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
-static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
+ u8 reset_method)
{
u16 dev_id;
int err;
@@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
- err = mlx5_check_hotplug_interrupt(dev);
- if (err)
- return false;
+ if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
+ err = mlx5_check_hotplug_interrupt(dev);
+ if (err)
+ return false;
+ }
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
@@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
- !mlx5_is_reset_now_capable(dev)) {
+ err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
+ if (err)
+ mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
+
+ if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
+ !mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
- u16 reg16, dev_id;
int cap, err;
+ u16 reg16;
- err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
- if (err)
- return pcibios_err_to_errno(err);
- err = mlx5_check_dev_ids(dev, dev_id);
- if (err)
- return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@@ -528,6 +543,44 @@ restore:
return err;
}
+static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
+ return -EOPNOTSUPP;
+
+ return pci_reset_bus(dev->pdev);
+}
+
+static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
+{
+ u16 dev_id;
+ int err;
+
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return pcibios_err_to_errno(err);
+ err = mlx5_check_dev_ids(dev, dev_id);
+ if (err)
+ return err;
+
+ switch (reset_method) {
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
+ err = mlx5_pci_link_toggle(dev, dev_id);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
+ break;
+ case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
+ err = mlx5_pci_reset_bus(dev);
+ if (err)
+ mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
goto done;
}
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
@@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_pci_link_toggle(dev);
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
fw_reset->ret = err;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4b88d969a66c..8b0abd61eca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
+ MLX5_SET(cmd_hca_cap, set_hca_cap,
+ pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
@@ -2223,6 +2226,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
+ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
index 8bce730b5c5b..db2bd3ad63ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
void *attr;
+ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
+ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ return -EOPNOTSUPP;
+
MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
MLX5_SET(scheduling_context, sched_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 8c2a34a0d6be..baefb9a3fa05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -251,9 +251,9 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_1);
+ flow_table_context.sws.sw_owner_icm_root_1);
output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
- flow_table_context.sw_owner_icm_root_0);
+ flow_table_context.sws.sw_owner_icm_root_0);
return 0;
}
@@ -480,15 +480,15 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
*/
if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_tx);
} else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_0, attr->icm_addr_rx);
+ sws.sw_owner_icm_root_0, attr->icm_addr_rx);
MLX5_SET64(flow_table_context, ft_mdev,
- sw_owner_icm_root_1, attr->icm_addr_tx);
+ sws.sw_owner_icm_root_1, attr->icm_addr_tx);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 50c2554c9ccf..833cb68c744f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -9,14 +9,6 @@
#include "fs_dr.h"
#include "dr_types.h"
-static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
-{
- if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
- return true;
-
- return false;
-}
-
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
@@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
- fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
@@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
* TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
- if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+ if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
- is_decap = fte->action.pkt_reformat->reformat_type ==
+ is_decap = fte->act_dests.action.pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
else
delay_encap_set = true;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
actions[num_actions++] =
- fte->action.modify_hdr->action.dr_action;
+ fte->act_dests.action.modify_hdr->action.dr_action;
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+ tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (delay_encap_set)
actions[num_actions++] =
- fte->action.pkt_reformat->action.dr_action;
+ fte->act_dests.action.pkt_reformat->action.dr_action;
/* The order of the actions below is not important */
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
@@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions++].dest = tmp_action;
}
- if (fte->flow_context.flow_tag) {
+ if (fte->act_dests.flow_context.flow_tag) {
tmp_action =
- mlx5dr_action_create_tag(fte->flow_context.flow_tag);
+ mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
@@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
@@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+ if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ struct mlx5_flow_act *action = &fte->act_dests.action;
+
+ if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
- fte->action.exe_aso.object_id,
- fte->action.exe_aso.return_reg_id,
- fte->action.exe_aso.type,
- fte->action.exe_aso.flow_meter.init_color,
- fte->action.exe_aso.flow_meter.meter_idx);
+ action->exe_aso.object_id,
+ action->exe_aso.return_reg_id,
+ action->exe_aso.type,
+ action->exe_aso.flow_meter.init_color,
+ action->exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
- !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
- u32 flow_source = fte->flow_context.flow_source;
+ !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ u32 flow_source = fte->act_dests.flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
&params,
num_actions,
actions,
- fte->flow_context.flow_source);
+ fte->act_dests.flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
@@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (dr_is_fw_term_table(ft))
+ if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
- u32 steering_caps = 0;
+ u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
- return 0;
+ return steering_caps;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
new file mode 100644
index 000000000000..c78512eed8d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)/..
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
new file mode 100644
index 000000000000..f39d636ff39a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -0,0 +1,926 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_H_
+#define MLX5HWS_H_
+
+struct mlx5hws_context;
+struct mlx5hws_table;
+struct mlx5hws_matcher;
+struct mlx5hws_rule;
+
+enum mlx5hws_table_type {
+ MLX5HWS_TABLE_TYPE_FDB,
+ MLX5HWS_TABLE_TYPE_MAX,
+};
+
+enum mlx5hws_matcher_resource_mode {
+ /* Allocate resources based on number of rules with minimal failure probability */
+ MLX5HWS_MATCHER_RESOURCE_MODE_RULE,
+ /* Allocate fixed size hash table based on given column and rows */
+ MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE,
+};
+
+enum mlx5hws_action_type {
+ MLX5HWS_ACTION_TYP_LAST,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2,
+ MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3,
+ MLX5HWS_ACTION_TYP_DROP,
+ MLX5HWS_ACTION_TYP_MISS,
+ MLX5HWS_ACTION_TYP_TBL,
+ MLX5HWS_ACTION_TYP_CTR,
+ MLX5HWS_ACTION_TYP_TAG,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ MLX5HWS_ACTION_TYP_VPORT,
+ MLX5HWS_ACTION_TYP_POP_VLAN,
+ MLX5HWS_ACTION_TYP_PUSH_VLAN,
+ MLX5HWS_ACTION_TYP_ASO_METER,
+ MLX5HWS_ACTION_TYP_INSERT_HEADER,
+ MLX5HWS_ACTION_TYP_REMOVE_HEADER,
+ MLX5HWS_ACTION_TYP_RANGE,
+ MLX5HWS_ACTION_TYP_SAMPLER,
+ MLX5HWS_ACTION_TYP_DEST_ARRAY,
+ MLX5HWS_ACTION_TYP_MAX,
+};
+
+enum mlx5hws_action_flags {
+ MLX5HWS_ACTION_FLAG_HWS_FDB = 1 << 0,
+ /* Shared action can be used over a few threads, since the
+ * data is written only once at the creation of the action.
+ */
+ MLX5HWS_ACTION_FLAG_SHARED = 1 << 1,
+};
+
+enum mlx5hws_action_aso_meter_color {
+ MLX5HWS_ACTION_ASO_METER_COLOR_RED = 0x0,
+ MLX5HWS_ACTION_ASO_METER_COLOR_YELLOW = 0x1,
+ MLX5HWS_ACTION_ASO_METER_COLOR_GREEN = 0x2,
+ MLX5HWS_ACTION_ASO_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum mlx5hws_send_queue_actions {
+ /* Start executing all pending queued rules */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+ /* Start executing all pending queued rules wait till completion */
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
+};
+
+struct mlx5hws_context_attr {
+ u16 queues;
+ u16 queue_size;
+ bool bwc; /* add support for backward compatible API*/
+};
+
+struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
+};
+
+enum mlx5hws_matcher_flow_src {
+ MLX5HWS_MATCHER_FLOW_SRC_ANY = 0x0,
+ MLX5HWS_MATCHER_FLOW_SRC_WIRE = 0x1,
+ MLX5HWS_MATCHER_FLOW_SRC_VPORT = 0x2,
+};
+
+enum mlx5hws_matcher_insert_mode {
+ MLX5HWS_MATCHER_INSERT_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_INSERT_BY_INDEX = 0x1,
+};
+
+enum mlx5hws_matcher_distribute_mode {
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH = 0x0,
+ MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR = 0x1,
+};
+
+struct mlx5hws_matcher_attr {
+ /* Processing priority inside table */
+ u32 priority;
+ /* Provide all rules with unique rule_idx in num_log range to reduce locking */
+ bool optimize_using_rule_idx;
+ /* Resource mode and corresponding size */
+ enum mlx5hws_matcher_resource_mode mode;
+ /* Optimize insertion in case packet origin is the same for all rules */
+ enum mlx5hws_matcher_flow_src optimize_flow_src;
+ /* Define the insertion and distribution modes for this matcher */
+ enum mlx5hws_matcher_insert_mode insert_mode;
+ enum mlx5hws_matcher_distribute_mode distribute_mode;
+ /* Define whether the created matcher supports resizing into a bigger matcher */
+ bool resizable;
+ union {
+ struct {
+ u8 sz_row_log;
+ u8 sz_col_log;
+ } table;
+
+ struct {
+ u8 num_log;
+ } rule;
+ };
+ /* Optional AT attach configuration - Max number of additional AT */
+ u8 max_num_of_at_attach;
+};
+
+struct mlx5hws_rule_attr {
+ void *user_data;
+ /* Valid if matcher optimize_using_rule_idx is set or
+ * if matcher is configured to insert rules by index.
+ */
+ u32 rule_idx;
+ u32 flow_source;
+ u16 queue_id;
+ u32 burst:1;
+};
+
+/* In actions that take offset, the offset is unique, pointing to a single
+ * resource and the user should not reuse the same index because data changing
+ * is not atomic.
+ */
+struct mlx5hws_rule_action {
+ struct mlx5hws_action *action;
+ union {
+ struct {
+ u32 value;
+ } tag;
+
+ struct {
+ u32 offset;
+ } counter;
+
+ struct {
+ u32 offset;
+ u8 *data;
+ } modify_header;
+
+ struct {
+ u32 offset;
+ u8 hdr_idx;
+ u8 *data;
+ } reformat;
+
+ struct {
+ __be32 vlan_hdr;
+ } push_vlan;
+
+ struct {
+ u32 offset;
+ enum mlx5hws_action_aso_meter_color init_color;
+ } aso_meter;
+ };
+};
+
+struct mlx5hws_action_reformat_header {
+ size_t sz;
+ void *data;
+};
+
+struct mlx5hws_action_insert_header {
+ struct mlx5hws_action_reformat_header hdr;
+ /* PRM start anchor to which header will be inserted */
+ u8 anchor;
+ /* Header insertion offset in bytes, from the start
+ * anchor to the location where new header will be inserted.
+ */
+ u8 offset;
+ /* Indicates this header insertion adds encapsulation header to the packet,
+ * requiring device to update offloaded fields (for example IPv4 total length).
+ */
+ bool encap;
+};
+
+struct mlx5hws_action_remove_header_attr {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+};
+
+struct mlx5hws_action_mh_pattern {
+ /* Byte size of modify actions provided by "data" */
+ size_t sz;
+ /* PRM format modify actions pattern */
+ __be64 *data;
+};
+
+struct mlx5hws_action_dest_attr {
+ /* Required destination action to forward the packet */
+ struct mlx5hws_action *dest;
+ /* Optional reformat action */
+ struct mlx5hws_action *reformat;
+};
+
+/**
+ * mlx5hws_is_supported - Check whether HWS is supported
+ *
+ * @mdev: The device to check.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool mlx5hws_is_supported(struct mlx5_core_dev *mdev)
+{
+ u8 ignore_flow_level_rtc_valid;
+ u8 wqe_based_flow_table_update;
+
+ wqe_based_flow_table_update =
+ MLX5_CAP_GEN(mdev, wqe_based_flow_table_update_cap);
+ ignore_flow_level_rtc_valid =
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ return wqe_based_flow_table_update && ignore_flow_level_rtc_valid;
+}
+
+/**
+ * mlx5hws_context_open - Open a context used for direct rule insertion
+ * using hardware steering.
+ *
+ * @mdev: The device to be used for HWS.
+ * @attr: Attributes used for context open.
+ *
+ * Return: pointer to mlx5hws_context on success NULL otherwise.
+ */
+struct mlx5hws_context *
+mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr);
+
+/**
+ * mlx5hws_context_close - Close a context used for direct hardware steering.
+ *
+ * @ctx: mlx5hws context to close.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_context_close(struct mlx5hws_context *ctx);
+
+/**
+ * mlx5hws_context_set_peer - Set a peer context.
+ * Each context can have multiple contexts as peers.
+ *
+ * @ctx: The context in which the peer_ctx will be peered to it.
+ * @peer_ctx: The peer context.
+ * @peer_vhca_id: The peer context vhca id.
+ */
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id);
+
+/**
+ * mlx5hws_table_create - Create a new direct rule table.
+ * Each table can contain multiple matchers.
+ *
+ * @ctx: The context in which the new table will be opened.
+ * @attr: Attributes used for table creation.
+ *
+ * Return: pointer to mlx5hws_table on success NULL otherwise.
+ */
+struct mlx5hws_table *
+mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr);
+
+/**
+ * mlx5hws_table_destroy - Destroy direct rule table.
+ *
+ * @tbl: Table to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_get_id() - Get ID of the flow table.
+ *
+ * @tbl:Table to get ID of.
+ *
+ * Return: ID of the table.
+ */
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl);
+
+/**
+ * mlx5hws_table_set_default_miss - Set default miss table for mlx5hws_table
+ * by using another mlx5hws_table.
+ * Traffic which all table matchers miss will be forwarded to miss table.
+ *
+ * @tbl: Source table
+ * @miss_tbl: Target (miss) table, or NULL to remove current miss table
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl);
+
+/**
+ * mlx5hws_match_template_create - Create a new match template based on items mask.
+ * The match template will be used for matcher creation.
+ *
+ * @ctx: The context in which the new template will be created.
+ * @match_param: Describe the mask based on PRM match parameters.
+ * @match_param_sz: Size of match param buffer.
+ * @match_criteria_enable: Bitmap for each sub-set in match_criteria buffer.
+ *
+ * Return: Pointer to mlx5hws_match_template on success, NULL otherwise.
+ */
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable);
+
+/**
+ * mlx5hws_match_template_destroy - Destroy a match template.
+ *
+ * @mt: Match template to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt);
+
+/**
+ * mlx5hws_action_template_create - Create a new action template based on an action_type array.
+ *
+ * @action_type: An array of actions based on the order of actions which will be provided
+ * with rule_actions to mlx5hws_rule_create. The last action is marked
+ * using MLX5HWS_ACTION_TYP_LAST.
+ *
+ * Return: Pointer to mlx5hws_action_template on success, NULL otherwise.
+ */
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[]);
+
+/**
+ * mlx5hws_action_template_destroy - Destroy action template.
+ *
+ * @at: Action template to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_create - Create a new direct rule matcher.
+ *
+ * Each matcher can contain multiple rules. Matchers on the table will be
+ * processed by priority. Matching fields and mask are described by the
+ * match template. In some cases, multiple match templates can be used on
+ * the same matcher.
+ *
+ * @table: The table in which the new matcher will be opened.
+ * @mt: Array of match templates to be used on matcher.
+ * @num_of_mt: Number of match templates in mt array.
+ * @at: Array of action templates to be used on matcher.
+ * @num_of_at: Number of action templates in at array.
+ * @attr: Attributes used for matcher creation.
+ *
+ * Return: Pointer to mlx5hws_matcher on success, NULL otherwise.
+ *
+ */
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *table,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr);
+
+/**
+ * mlx5hws_matcher_destroy - Destroy a direct rule matcher.
+ *
+ * @matcher: Matcher to destroy.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher);
+
+/**
+ * mlx5hws_matcher_attach_at - Attach a new action template to a direct rule matcher.
+ *
+ * @matcher: Matcher to attach the action template to.
+ * @at: Action template to be attached to the matcher.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at);
+
+/**
+ * mlx5hws_matcher_resize_set_target - Link two matchers and enable moving rules.
+ *
+ * Both matchers must be in the same table type, must be created with the
+ * 'resizable' property, and should have the same characteristics (e.g., same
+ * match templates and action templates). It is the user's responsibility to
+ * ensure that the destination matcher is allocated with the appropriate size.
+ *
+ * Once the function is completed, the user is:
+ * - Allowed to move rules from the source into the destination matcher.
+ * - No longer allowed to insert rules into the source matcher.
+ *
+ * The user is always allowed to insert rules into the destination matcher and
+ * to delete rules from any matcher.
+ *
+ * @src_matcher: Source matcher for moving rules from.
+ * @dst_matcher: Destination matcher for moving rules to.
+ *
+ * Return: Zero on successful move, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher);
+
+/**
+ * mlx5hws_matcher_resize_rule_move - Enqueue moving rule operation.
+ *
+ * This function enqueues the operation of moving a rule from the source
+ * matcher to the destination matcher.
+ *
+ * @src_matcher: Matcher that the rule belongs to.
+ * @rule: The rule to move.
+ * @attr: Rule attributes.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_create - Enqueue create rule operation.
+ *
+ * @matcher: The matcher in which the new rule will be created.
+ * @mt_idx: Match template index to create the match with.
+ * @match_param: The match parameter PRM buffer used for value matching.
+ * @at_idx: Action template index to apply the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule creation attributes.
+ * @rule_handle: A valid rule handle. The handle doesn't require any initialization.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle);
+
+/**
+ * mlx5hws_rule_destroy - Enqueue destroy rule operation.
+ *
+ * @rule: The rule destruction to enqueue.
+ * @attr: Rule destruction attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_rule_action_update - Enqueue update actions on an existing rule.
+ *
+ * @rule: A valid rule handle to update.
+ * @at_idx: Action template index to update the actions with.
+ * @rule_actions: Rule actions to be executed on match.
+ * @attr: Rule update attributes.
+ *
+ * Return: Zero on successful enqueue, non-zero otherwise.
+ */
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr);
+
+/**
+ * mlx5hws_action_get_type - Get action type.
+ *
+ * @action: The action to get the type of.
+ *
+ * Return: action type.
+ */
+enum mlx5hws_action_type
+mlx5hws_action_get_type(struct mlx5hws_action *action);
+
+/**
+ * mlx5hws_action_create_dest_drop - Create a direct rule drop action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_default_miss - Create a direct rule default miss action.
+ * Defaults are RX: Drop, TX: Wire.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: Pointer to mlx5hws_action on success, NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table - Create direct rule goto table action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl: Destination table.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_table_num - Create direct rule goto table number action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @tbl_num: Destination table number.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 tbl_num, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_match_range - Create direct rule range match action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @field: Field to comapare the value.
+ * @hit_ft: Flow table to go to on hit.
+ * @miss_ft: Flow table to go to on miss.
+ * @min: Minimal value of the field to be considered as hit.
+ * @max: Maximal value of the field to be considered as hit.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags);
+
+/**
+ * mlx5hws_action_create_flow_sampler - Create direct rule flow sampler action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @sampler_id: Flow sampler object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_vport - Create direct rule goto vport action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @vport_num: Destination vport number.
+ * @vhca_id_valid: Tells if the vhca_id parameter is valid.
+ * @vhca_id: VHCA ID of the destination vport.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_tag - Create direct rule TAG action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_counter - Create direct rule counter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: Direct rule counter object ID.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_reformat - Create direct rule reformat action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @reformat_type: Type of reformat prefixed with MLX5HWS_ACTION_TYP_REFORMAT.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this reformat.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_modify_header - Create direct rule modify header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_patterns: Number of provided patterns in "patterns" array.
+ * @patterns: Patterns array containing pattern information.
+ * @log_bulk_size: Number of unique values used with this pattern.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_aso_meter - Create direct rule ASO flow meter action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @obj_id: ASO object ID.
+ * @return_reg_c: Copy the ASO object value into this reg_c,
+ * after a packet hits a rule with this ASO object.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_c,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_pop_vlan - Create direct rule pop vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_push_vlan - Create direct rule push vlan action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_create_dest_array - Create a dest array action, this action can
+ * duplicate packets and forward to multiple destinations in the destination list.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_dest: The number of dests attributes.
+ * @dests: The destination array. Each contains a destination action and can
+ * have additional actions.
+ * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
+ * @flow_source: Source port of the traffic for this actions.
+ * @flags: Action creation flags (enum mlx5hws_action_flags).
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_insert_header - Create insert header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @num_of_hdrs: Number of provided headers in "hdrs" array.
+ * @hdrs: Headers array containing header information.
+ * @log_bulk_size: Number of unique values used with this insert header.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_remove_header - Create remove header action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @attr: attributes that specifie the remove header type, PRM start anchor and
+ * the PRM end anchor or the PRM start anchor and remove size in bytes.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags);
+
+/**
+ * mlx5hws_action_create_last - Create direct rule LAST action.
+ *
+ * @ctx: The context in which the new action will be created.
+ * @flags: Action creation flags. (enum mlx5hws_action_flags)
+ *
+ * Return: pointer to mlx5hws_action on success NULL otherwise.
+ */
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags);
+
+/**
+ * mlx5hws_action_destroy - Destroy direct rule action.
+ *
+ * @action: The action to destroy.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_action_destroy(struct mlx5hws_action *action);
+
+enum mlx5hws_flow_op_status {
+ MLX5HWS_FLOW_OP_SUCCESS,
+ MLX5HWS_FLOW_OP_ERROR,
+};
+
+struct mlx5hws_flow_op_result {
+ enum mlx5hws_flow_op_status status;
+ void *user_data;
+};
+
+/**
+ * mlx5hws_send_queue_poll - Poll queue for rule creation and deletions completions.
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to poll.
+ * @res: Completion array.
+ * @res_nb: Maximum number of results to return.
+ *
+ * Return: negative number on failure, the number of completions otherwise.
+ */
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb);
+
+/**
+ * mlx5hws_send_queue_action - Perform an action on the queue
+ *
+ * @ctx: The context to which the queue belong to.
+ * @queue_id: The id of the queue to perform the action on.
+ * @actions: Actions to perform on the queue (enum mlx5hws_send_queue_actions)
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+/**
+ * mlx5hws_debug_dump - Dump HWS info
+ *
+ * @ctx: The context which to dump the info from.
+ *
+ * Return: zero on success non zero otherwise.
+ */
+int mlx5hws_debug_dump(struct mlx5hws_context *ctx);
+
+struct mlx5hws_bwc_matcher;
+struct mlx5hws_bwc_rule;
+
+struct mlx5hws_match_parameters {
+ size_t match_sz;
+ u32 *match_buf; /* Device spec format */
+};
+
+/**
+ * mlx5hws_bwc_matcher_create - Create a new BWC direct rule matcher.
+ *
+ * This function does the following:
+ * - creates match template based on flow items
+ * - creates an empty action template
+ * - creates a usual mlx5hws_matcher with these mt and at, setting
+ * its size to minimal
+ * Notes:
+ * - table->ctx must have BWC support
+ * - complex rules are not supported
+ *
+ * @table: The table in which the new matcher will be opened
+ * @priority: Priority for this BWC matcher
+ * @match_criteria_enable: Bitmask that defines matching criteria
+ * @mask: Match parameters
+ *
+ * Return: pointer to mlx5hws_bwc_matcher on success or NULL otherwise.
+ */
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+/**
+ * mlx5hws_bwc_matcher_destroy - Destroy BWC direct rule matcher.
+ *
+ * @bwc_matcher: Matcher to destroy
+ *
+ * Return: zero on success, non zero otherwise
+ */
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+/**
+ * mlx5hws_bwc_rule_create - Create a new BWC rule.
+ *
+ * Unlike the usual rule creation function, this one is blocking: when the
+ * function returns, the rule is written to its place (no need to poll).
+ * This function does the following:
+ * - finds matching action template based on the provided rule_actions, or
+ * creates new action template if matching action template doesn't exist
+ * - updates corresponding BWC matcher stats
+ * - if needed, the function performs rehash:
+ * - creates a new matcher based on mt, at, new_sz
+ * - moves all the existing matcher rules to the new matcher
+ * - removes the old matcher
+ * - inserts new rule
+ * - polls till completion is received
+ * Notes:
+ * - matcher->tbl->ctx must have BWC support
+ * - separate BWC ctx queues are used
+ *
+ * @bwc_matcher: The BWC matcher in which the new rule will be created.
+ * @params: Match perameters
+ * @flow_source: Flow source for this rule
+ * @rule_actions: Rule action to be executed on match
+ *
+ * Return: valid BWC rule handle on success, NULL otherwise
+ */
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[]);
+
+/**
+ * mlx5hws_bwc_rule_destroy - Destroy BWC direct rule.
+ *
+ * @bwc_rule: Rule to destroy.
+ *
+ * Return: zero on success, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule);
+
+/**
+ * mlx5hws_bwc_rule_action_update - Update actions on an existing BWC rule.
+ *
+ * @bwc_rule: Rule to update
+ * @rule_actions: Rule action to update with
+ *
+ * Return: zero on successful update, non zero otherwise.
+ */
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[]);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
new file mode 100644
index 000000000000..b27bb4106532
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.c
@@ -0,0 +1,2604 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+#define MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET 1
+
+/* Header removal size limited to 128B (64 words) */
+#define MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE 128
+
+/* This is the longest supported action sequence for FDB table:
+ * DECAP, POP_VLAN, MODIFY, CTR, ASO, PUSH_VLAN, MODIFY, ENCAP, Term.
+ */
+static const u32 action_order_arr[MLX5HWS_TABLE_TYPE_MAX][MLX5HWS_ACTION_TYP_MAX] = {
+ [MLX5HWS_TABLE_TYPE_FDB] = {
+ BIT(MLX5HWS_ACTION_TYP_REMOVE_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_POP_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_PUSH_VLAN),
+ BIT(MLX5HWS_ACTION_TYP_INSERT_HEADER) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2) |
+ BIT(MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3),
+ BIT(MLX5HWS_ACTION_TYP_CTR),
+ BIT(MLX5HWS_ACTION_TYP_TAG),
+ BIT(MLX5HWS_ACTION_TYP_ASO_METER),
+ BIT(MLX5HWS_ACTION_TYP_MODIFY_HDR),
+ BIT(MLX5HWS_ACTION_TYP_TBL) |
+ BIT(MLX5HWS_ACTION_TYP_VPORT) |
+ BIT(MLX5HWS_ACTION_TYP_DROP) |
+ BIT(MLX5HWS_ACTION_TYP_SAMPLER) |
+ BIT(MLX5HWS_ACTION_TYP_RANGE) |
+ BIT(MLX5HWS_ACTION_TYP_DEST_ARRAY),
+ BIT(MLX5HWS_ACTION_TYP_LAST),
+ },
+};
+
+static const char * const mlx5hws_action_type_str[] = {
+ [MLX5HWS_ACTION_TYP_LAST] = "LAST",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2] = "TNL_L2_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2] = "L2_TO_TNL_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2] = "TNL_L3_TO_L2",
+ [MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3] = "L2_TO_TNL_L3",
+ [MLX5HWS_ACTION_TYP_DROP] = "DROP",
+ [MLX5HWS_ACTION_TYP_TBL] = "TBL",
+ [MLX5HWS_ACTION_TYP_CTR] = "CTR",
+ [MLX5HWS_ACTION_TYP_TAG] = "TAG",
+ [MLX5HWS_ACTION_TYP_MODIFY_HDR] = "MODIFY_HDR",
+ [MLX5HWS_ACTION_TYP_VPORT] = "VPORT",
+ [MLX5HWS_ACTION_TYP_MISS] = "DEFAULT_MISS",
+ [MLX5HWS_ACTION_TYP_POP_VLAN] = "POP_VLAN",
+ [MLX5HWS_ACTION_TYP_PUSH_VLAN] = "PUSH_VLAN",
+ [MLX5HWS_ACTION_TYP_ASO_METER] = "ASO_METER",
+ [MLX5HWS_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
+ [MLX5HWS_ACTION_TYP_INSERT_HEADER] = "INSERT_HEADER",
+ [MLX5HWS_ACTION_TYP_REMOVE_HEADER] = "REMOVE_HEADER",
+ [MLX5HWS_ACTION_TYP_SAMPLER] = "SAMPLER",
+ [MLX5HWS_ACTION_TYP_RANGE] = "RANGE",
+};
+
+static_assert(ARRAY_SIZE(mlx5hws_action_type_str) == MLX5HWS_ACTION_TYP_MAX,
+ "Missing mlx5hws_action_type_str");
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type)
+{
+ return mlx5hws_action_type_str[action_type];
+}
+
+enum mlx5hws_action_type mlx5hws_action_get_type(struct mlx5hws_action *action)
+{
+ return action->type;
+}
+
+static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
+ enum mlx5hws_context_shared_stc_type stc_type,
+ u8 tbl_type)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_shared_stc *shared_stc;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (ctx->common_res[tbl_type].shared_stc[stc_type]) {
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount++;
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+ }
+
+ shared_stc = kzalloc(sizeof(*shared_stc), GFP_KERNEL);
+ if (!shared_stc) {
+ ret = -ENOMEM;
+ goto unlock_and_out;
+ }
+ switch (stc_type) {
+ case MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.remove_header.decap = 0;
+ stc_attr.remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ stc_attr.remove_header.end_anchor = MLX5_HEADER_ANCHOR_IPV6_IPV4;
+ break;
+ case MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP:
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ stc_attr.remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ stc_attr.remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ default:
+ mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
+ pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
+ ret = -EINVAL;
+ goto unlock_and_out;
+ }
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &shared_stc->stc_chunk);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate shared decap l2 STC\n");
+ goto free_shared_stc;
+ }
+
+ ctx->common_res[tbl_type].shared_stc[stc_type] = shared_stc;
+ ctx->common_res[tbl_type].shared_stc[stc_type]->refcount = 1;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+free_shared_stc:
+ kfree(shared_stc);
+unlock_and_out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_action_get_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return -EINVAL;
+ }
+
+ if (unlikely(!(action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB))) {
+ pr_warn("HWS: Invalid action->flags: %d\n", action->flags);
+ return -EINVAL;
+ }
+
+ ret = hws_action_get_shared_stc_nic(ctx, stc_type, MLX5HWS_TABLE_TYPE_FDB);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate memory for FDB shared STCs (type: %d)\n",
+ stc_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_action_put_shared_stc(struct mlx5hws_action *action,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ enum mlx5hws_table_type tbl_type = MLX5HWS_TABLE_TYPE_FDB;
+ struct mlx5hws_action_shared_stc *shared_stc;
+ struct mlx5hws_context *ctx = action->ctx;
+
+ if (stc_type >= MLX5HWS_CONTEXT_SHARED_STC_MAX) {
+ pr_warn("HWS: Invalid shared stc_type: %d\n", stc_type);
+ return;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (--ctx->common_res[tbl_type].shared_stc[stc_type]->refcount) {
+ mutex_unlock(&ctx->ctrl_lock);
+ return;
+ }
+
+ shared_stc = ctx->common_res[tbl_type].shared_stc[stc_type];
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &shared_stc->stc_chunk);
+ kfree(shared_stc);
+ ctx->common_res[tbl_type].shared_stc[stc_type] = NULL;
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static void hws_action_print_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions)
+{
+ mlx5hws_err(ctx, "Invalid action_type sequence");
+ while (*user_actions != MLX5HWS_ACTION_TYP_LAST) {
+ mlx5hws_err(ctx, " %s", mlx5hws_action_type_to_str(*user_actions));
+ user_actions++;
+ }
+ mlx5hws_err(ctx, "\n");
+}
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type)
+{
+ const u32 *order_arr = action_order_arr[table_type];
+ u8 order_idx = 0;
+ u8 user_idx = 0;
+ bool valid_combo;
+
+ if (table_type >= MLX5HWS_TABLE_TYPE_MAX) {
+ mlx5hws_err(ctx, "Invalid table_type %d", table_type);
+ return false;
+ }
+
+ while (order_arr[order_idx] != BIT(MLX5HWS_ACTION_TYP_LAST)) {
+ /* User action order validated move to next user action */
+ if (BIT(user_actions[user_idx]) & order_arr[order_idx])
+ user_idx++;
+
+ /* Iterate to the next supported action in the order */
+ order_idx++;
+ }
+
+ /* Combination is valid if all user action were processed */
+ valid_combo = user_actions[user_idx] == MLX5HWS_ACTION_TYP_LAST;
+ if (!valid_combo)
+ hws_action_print_combo(ctx, user_actions);
+
+ return valid_combo;
+}
+
+static bool
+hws_action_fixup_stc_attr(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ struct mlx5hws_cmd_stc_modify_attr *fixup_stc_attr,
+ enum mlx5hws_table_type table_type,
+ bool is_mirror)
+{
+ bool use_fixup = false;
+ u32 fw_tbl_type;
+ u32 base_id;
+
+ fw_tbl_type = mlx5hws_table_get_res_fw_ft_type(table_type, is_mirror);
+
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ if (is_mirror && stc_attr->ste_table.ignore_tx) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ break;
+ }
+ if (!is_mirror)
+ base_id = mlx5hws_pool_chunk_get_base_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+ else
+ base_id =
+ mlx5hws_pool_chunk_get_base_mirror_id(stc_attr->ste_table.ste_pool,
+ &stc_attr->ste_table.ste);
+
+ *fixup_stc_attr = *stc_attr;
+ fixup_stc_attr->ste_table.ste_obj_id = base_id;
+ use_fixup = true;
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ if (fw_tbl_type == FS_FT_FDB_TX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ fixup_stc_attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.esw_owner_vhca_id = ctx->caps->vhca_id;
+ fixup_stc_attr->vport.vport_num = ctx->caps->eswitch_manager_vport_number;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ ctx->caps->merged_eswitch;
+ use_fixup = true;
+ }
+ break;
+
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ if (stc_attr->vport.vport_num != MLX5_VPORT_UPLINK)
+ break;
+
+ if (fw_tbl_type == FS_FT_FDB_TX || fw_tbl_type == FS_FT_FDB_RX) {
+ /* The FW doesn't allow to go to wire in the TX/RX by JUMP_TO_VPORT */
+ fixup_stc_attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK;
+ fixup_stc_attr->action_offset = stc_attr->action_offset;
+ fixup_stc_attr->stc_offset = stc_attr->stc_offset;
+ fixup_stc_attr->vport.vport_num = 0;
+ fixup_stc_attr->vport.esw_owner_vhca_id = stc_attr->vport.esw_owner_vhca_id;
+ fixup_stc_attr->vport.eswitch_owner_vhca_id_valid =
+ stc_attr->vport.eswitch_owner_vhca_id_valid;
+ }
+ use_fixup = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return use_fixup;
+}
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr cleanup_stc_attr = {0};
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr fixup_stc_attr = {0};
+ bool use_fixup;
+ u32 obj_0_id;
+ int ret;
+
+ ret = mlx5hws_pool_chunk_alloc(stc_pool, stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate single action STC\n");
+ return ret;
+ }
+
+ stc_attr->stc_offset = stc->offset;
+
+ /* Dynamic reparse not supported, overwrite and use default */
+ if (!mlx5hws_context_cap_dynamic_reparse(ctx))
+ stc_attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ obj_0_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+
+ /* According to table/action limitation change the stc_attr */
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr, &fixup_stc_attr, table_type, false);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to modify STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto free_chunk;
+ }
+
+ /* Modify the FDB peer */
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ u32 obj_1_id;
+
+ obj_1_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+
+ use_fixup = hws_action_fixup_stc_attr(ctx, stc_attr,
+ &fixup_stc_attr,
+ table_type, true);
+ ret = mlx5hws_cmd_stc_modify(ctx->mdev, obj_1_id,
+ use_fixup ? &fixup_stc_attr : stc_attr);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to modify peer STC action_type %d tbl_type %d\n",
+ stc_attr->action_type, table_type);
+ goto clean_obj_0;
+ }
+ }
+
+ return 0;
+
+clean_obj_0:
+ cleanup_stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ cleanup_stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ cleanup_stc_attr.stc_offset = stc->offset;
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_0_id, &cleanup_stc_attr);
+free_chunk:
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+ return ret;
+}
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_pool *stc_pool = ctx->stc_pool[table_type];
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ u32 obj_id;
+
+ /* Modify the STC not to point to an object */
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.stc_offset = stc->offset;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+
+ if (table_type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, stc);
+ mlx5hws_cmd_stc_modify(ctx->mdev, obj_id, &stc_attr);
+ }
+
+ mlx5hws_pool_chunk_free(stc_pool, stc);
+}
+
+static u32 hws_action_get_mh_stc_type(struct mlx5hws_context *ctx,
+ __be64 pattern)
+{
+ u8 action_type = MLX5_GET(set_action_in, &pattern, action_type);
+
+ switch (action_type) {
+ case MLX5_MODIFICATION_TYPE_SET:
+ return MLX5_IFC_STC_ACTION_TYPE_SET;
+ case MLX5_MODIFICATION_TYPE_ADD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD;
+ case MLX5_MODIFICATION_TYPE_COPY:
+ return MLX5_IFC_STC_ACTION_TYPE_COPY;
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ return MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD;
+ default:
+ mlx5hws_err(ctx, "Unsupported action type: 0x%x\n", action_type);
+ return MLX5_IFC_STC_ACTION_TYPE_NOP;
+ }
+}
+
+static void hws_action_fill_stc_attr(struct mlx5hws_action *action,
+ u32 obj_id,
+ struct mlx5hws_cmd_stc_modify_attr *attr)
+{
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_TAG:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_TAG;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ break;
+ case MLX5HWS_ACTION_TYP_DROP:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_DROP;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_MISS:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ break;
+ case MLX5HWS_ACTION_TYP_CTR:
+ attr->id = obj_id;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_COUNTER;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ if (action->modify_header.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+
+ if (action->modify_header.num_of_actions == 1) {
+ attr->modify_action.data = action->modify_header.single_action;
+ attr->action_type = hws_action_get_mh_stc_type(action->ctx,
+ attr->modify_action.data);
+
+ if (attr->action_type == MLX5_IFC_STC_ACTION_TYPE_ADD ||
+ attr->action_type == MLX5_IFC_STC_ACTION_TYPE_SET)
+ MLX5_SET(set_action_in, &attr->modify_action.data, data, 0);
+ } else {
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST;
+ attr->modify_header.arg_id = action->modify_header.arg_id;
+ attr->modify_header.pattern_id = action->modify_header.pat_id;
+ }
+ break;
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj_id;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_header.decap = 1;
+ attr->remove_header.start_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->remove_header.end_anchor = MLX5_HEADER_ANCHOR_INNER_MAC;
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ if (!action->reformat.require_reparse)
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->insert_header.encap = action->reformat.encap;
+ attr->insert_header.insert_anchor = action->reformat.anchor;
+ attr->insert_header.arg_id = action->reformat.arg_id;
+ attr->insert_header.header_size = action->reformat.header_size;
+ attr->insert_header.insert_offset = action->reformat.offset;
+ break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_ASO;
+ attr->aso.aso_type = ASO_OPC_MOD_POLICER;
+ attr->aso.devx_obj_id = obj_id;
+ attr->aso.return_reg_id = action->aso.return_reg_id;
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT;
+ attr->vport.vport_num = action->vport.vport_num;
+ attr->vport.esw_owner_vhca_id = action->vport.esw_owner_vhca_id;
+ attr->vport.eswitch_owner_vhca_id_valid = action->vport.esw_owner_vhca_id_valid;
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->remove_words.start_anchor = MLX5_HEADER_ANCHOR_FIRST_VLAN_START;
+ attr->remove_words.num_of_words = MLX5HWS_ACTION_HDR_LEN_L2_VLAN / 2;
+ break;
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ attr->insert_header.encap = 0;
+ attr->insert_header.is_inline = 1;
+ attr->insert_header.insert_anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ attr->insert_header.insert_offset = MLX5HWS_ACTION_HDR_LEN_L2_MACS;
+ attr->insert_header.header_size = MLX5HWS_ACTION_HDR_LEN_L2_VLAN;
+ break;
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS;
+ attr->remove_header.decap = 0; /* the mode we support decap is 0 */
+ attr->remove_words.start_anchor = action->remove_header.anchor;
+ /* the size is in already in words */
+ attr->remove_words.num_of_words = action->remove_header.size;
+ attr->action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ attr->reparse_mode = MLX5_IFC_STC_REPARSE_ALWAYS;
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid action type %d\n", action->type);
+ }
+}
+
+static int
+hws_action_create_stcs(struct mlx5hws_action *action, u32 obj_id)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ int ret;
+
+ hws_action_fill_stc_attr(action, obj_id, &stc_attr);
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate STC for FDB */
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB) {
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr,
+ MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto out_err;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+out_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void
+hws_action_destroy_stcs(struct mlx5hws_action *action)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+
+ /* Block unsupported parallel obj modify over the same base */
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_HWS_FDB)
+ mlx5hws_action_free_single_stc(ctx, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static bool hws_action_is_flag_hws_fdb(u32 flags)
+{
+ return flags & MLX5HWS_ACTION_FLAG_HWS_FDB;
+}
+
+static bool
+hws_action_validate_hws_action(struct mlx5hws_context *ctx, u32 flags)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "Cannot create HWS action since HWS is not supported\n");
+ return false;
+ }
+
+ if ((flags & MLX5HWS_ACTION_FLAG_HWS_FDB) && !ctx->caps->eswitch_manager) {
+ mlx5hws_err(ctx, "Cannot create HWS action for FDB for non-eswitch-manager\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic_bulk(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type,
+ u8 bulk_sz)
+{
+ struct mlx5hws_action *action;
+ int i;
+
+ if (!hws_action_is_flag_hws_fdb(flags)) {
+ mlx5hws_err(ctx,
+ "Action (type: %d) flags must specify only HWS FDB\n", action_type);
+ return NULL;
+ }
+
+ if (!hws_action_validate_hws_action(ctx, flags))
+ return NULL;
+
+ action = kcalloc(bulk_sz, sizeof(*action), GFP_KERNEL);
+ if (!action)
+ return NULL;
+
+ for (i = 0; i < bulk_sz; i++) {
+ action[i].ctx = ctx;
+ action[i].flags = flags;
+ action[i].type = action_type;
+ }
+
+ return action;
+}
+
+static struct mlx5hws_action *
+hws_action_create_generic(struct mlx5hws_context *ctx,
+ u32 flags,
+ enum mlx5hws_action_type action_type)
+{
+ return hws_action_create_generic_bulk(ctx, flags, action_type, 1);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table_num(struct mlx5hws_context *ctx,
+ u32 table_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TBL);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, table_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_obj.obj_id = table_id;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl,
+ u32 flags)
+{
+ return mlx5hws_action_create_dest_table_num(ctx, tbl->ft_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_drop(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DROP);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_default_miss(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_MISS);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_tag(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_TAG);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_action *
+hws_action_create_aso(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type action_type,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, action_type);
+ if (!action)
+ return NULL;
+
+ action->aso.obj_id = obj_id;
+ action->aso.return_reg_id = return_reg_id;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_aso_meter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u8 return_reg_id,
+ u32 flags)
+{
+ return hws_action_create_aso(ctx, MLX5HWS_ACTION_TYP_ASO_METER,
+ obj_id, return_reg_id, flags);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_counter(struct mlx5hws_context *ctx,
+ u32 obj_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_CTR);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, obj_id);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_vport(struct mlx5hws_context *ctx,
+ u16 vport_num,
+ bool vhca_id_valid,
+ u16 vhca_id,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!(flags & MLX5HWS_ACTION_FLAG_HWS_FDB)) {
+ mlx5hws_err(ctx, "Vport action is supported for FDB only\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_VPORT);
+ if (!action)
+ return NULL;
+
+ if (!ctx->caps->merged_eswitch && vhca_id_valid && vhca_id != ctx->caps->vhca_id) {
+ mlx5hws_err(ctx, "Non merged eswitch cannot send to other vhca\n");
+ goto free_action;
+ }
+
+ action->vport.vport_num = vport_num;
+ action->vport.esw_owner_vhca_id_valid = vhca_id_valid;
+
+ if (vhca_id_valid)
+ action->vport.esw_owner_vhca_id = vhca_id;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for vport %d\n", vport_num);
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_PUSH_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for push vlan\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_pop_vlan(struct mlx5hws_context *ctx, u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_POP_VLAN);
+ if (!action)
+ return NULL;
+
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create remove stc for reformat\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_stcs(action, 0);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed creating stc for pop vlan\n");
+ goto free_shared;
+ }
+
+ return action;
+
+free_shared:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_handle_insert_with_ptr(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ size_t max_sz = 0;
+ u32 arg_id;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz % W_SIZE != 0) {
+ mlx5hws_err(action->ctx,
+ "Header data size should be in WORD granularity\n");
+ return -EINVAL;
+ }
+ max_sz = max(hdrs[i].sz, max_sz);
+ }
+
+ /* Allocate single shared arg object for all headers */
+ ret = mlx5hws_arg_create(action->ctx,
+ hdrs->data,
+ max_sz,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ action[i].reformat.arg_id = arg_id;
+ action[i].reformat.header_size = hdrs[i].sz;
+ action[i].reformat.num_of_hdrs = num_of_hdrs;
+ action[i].reformat.max_hdr_sz = max_sz;
+ action[i].reformat.require_reparse = true;
+
+ if (action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2 ||
+ action[i].type == MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3) {
+ action[i].reformat.anchor = MLX5_HEADER_ANCHOR_PACKET_START;
+ action[i].reformat.offset = 0;
+ action[i].reformat.encap = 1;
+ }
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create stc for reformat\n");
+ goto free_stc;
+ }
+ }
+
+ return 0;
+
+free_stc:
+ while (i--)
+ hws_action_destroy_stcs(&action[i]);
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_handle_l2_to_tunnel_l3(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ int ret;
+
+ /* The action is remove-l2-header + insert-l3-header */
+ ret = hws_action_get_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ if (ret) {
+ mlx5hws_err(action->ctx, "Failed to create remove stc for reformat\n");
+ return ret;
+ }
+
+ /* Reuse the insert with pointer for the L2L3 header */
+ ret = hws_action_handle_insert_with_ptr(action,
+ num_of_hdrs,
+ hdrs,
+ log_bulk_sz);
+ if (ret)
+ goto put_shared_stc;
+
+ return 0;
+
+put_shared_stc:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ return ret;
+}
+
+static void hws_action_prepare_decap_l3_actions(size_t data_sz,
+ u8 *mh_data,
+ int *num_of_actions)
+{
+ int actions;
+ u32 i;
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(stc_ste_param_remove, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, mh_data, decap, 0x1);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_remove, mh_data, remove_end_anchor,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE; /* Assume every action is 2 dw */
+ actions = 1;
+
+ /* Add the new header using inline action 4Byte at a time, the header
+ * is added in reversed order to the beginning of the packet to avoid
+ * incorrect parsing by the HW. Since header is 14B or 18B an extra
+ * two bytes are padded and later removed.
+ */
+ for (i = 0; i < data_sz / MLX5HWS_ACTION_INLINE_DATA_SIZE + 1; i++) {
+ MLX5_SET(stc_ste_param_insert, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, mh_data, inline_data, 0x1);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ MLX5_SET(stc_ste_param_insert, mh_data, insert_size, 2);
+ mh_data += MLX5HWS_ACTION_DOUBLE_SIZE;
+ actions++;
+ }
+
+ /* Remove first 2 extra bytes */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_start_anchor,
+ MLX5_HEADER_ANCHOR_PACKET_START);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(stc_ste_param_remove_words, mh_data, remove_size, 1);
+ actions++;
+
+ *num_of_actions = actions;
+}
+
+static int
+hws_action_handle_tunnel_l3_to_l2(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_sz)
+{
+ u8 mh_data[MLX5HWS_ACTION_REFORMAT_DATA_SIZE] = {0};
+ struct mlx5hws_context *ctx = action->ctx;
+ u32 arg_id, pat_id;
+ int num_of_actions;
+ int mh_data_size;
+ int ret, i;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2 &&
+ hdrs[i].sz != MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN) {
+ mlx5hws_err(ctx, "Data size is not supported for decap-l3\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Create a full modify header action list in case shared */
+ hws_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions);
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ mlx5hws_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions);
+
+ /* All DecapL3 cases require the same max arg size */
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ (__be64 *)mh_data,
+ num_of_actions,
+ log_bulk_sz,
+ action->flags & MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ memset(mh_data, 0, MLX5HWS_ACTION_REFORMAT_DATA_SIZE);
+ hws_action_prepare_decap_l3_actions(hdrs[i].sz, mh_data, &num_of_actions);
+ mh_data_size = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+
+ ret = mlx5hws_pat_get_pattern(ctx, (__be64 *)mh_data, mh_data_size, &pat_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate pattern for DecapL3\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.max_num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_actions = num_of_actions;
+ action[i].modify_header.num_of_patterns = num_of_hdrs;
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse((__be64 *)mh_data, num_of_actions);
+
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ mlx5hws_arg_destroy(action->ctx, arg_id);
+ return ret;
+}
+
+static int
+hws_action_create_reformat_hws(struct mlx5hws_action *action,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 bulk_size)
+{
+ int ret;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ ret = hws_action_create_stcs(action, 0);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ ret = hws_action_handle_l2_to_tunnel_l3(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ ret = hws_action_handle_tunnel_l3_to_l2(action, num_of_hdrs, hdrs, bulk_size);
+ break;
+ default:
+ mlx5hws_err(action->ctx, "Invalid HWS reformat action type\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_reformat(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type reformat_type,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_reformat_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_hdrs) {
+ mlx5hws_err(ctx, "Reformat num_of_hdrs cannot be zero\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic_bulk(ctx, flags, reformat_type, num_of_hdrs);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_hdrs > 1)) {
+ mlx5hws_err(ctx, "Reformat flags don't fit HWS (flags: 0x%x)\n", flags);
+ goto free_action;
+ }
+
+ ret = hws_action_create_reformat_hws(action, num_of_hdrs, hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_action;
+ }
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static int
+hws_action_create_modify_header_hws(struct mlx5hws_action *action,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *pattern,
+ u32 log_bulk_size)
+{
+ struct mlx5hws_context *ctx = action->ctx;
+ u16 num_actions, max_mh_actions = 0;
+ int i, ret, size_in_bytes;
+ u32 pat_id, arg_id = 0;
+ __be64 *new_pattern;
+ size_t pat_max_sz;
+
+ pat_max_sz = MLX5HWS_ARG_CHUNK_SIZE_MAX * MLX5HWS_ARG_DATA_SIZE;
+ size_in_bytes = pat_max_sz * sizeof(__be64);
+ new_pattern = kcalloc(num_of_patterns, size_in_bytes, GFP_KERNEL);
+ if (!new_pattern)
+ return -ENOMEM;
+
+ /* Calculate maximum number of mh actions for shared arg allocation */
+ for (i = 0; i < num_of_patterns; i++) {
+ size_t new_num_actions;
+ size_t cur_num_actions;
+ u32 nope_location;
+
+ cur_num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+
+ mlx5hws_pat_calc_nope(pattern[i].data, cur_num_actions,
+ pat_max_sz / MLX5HWS_MODIFY_ACTION_SIZE,
+ &new_num_actions, &nope_location,
+ &new_pattern[i * pat_max_sz]);
+
+ action[i].modify_header.nope_locations = nope_location;
+ action[i].modify_header.num_of_actions = new_num_actions;
+
+ max_mh_actions = max(max_mh_actions, new_num_actions);
+ }
+
+ if (mlx5hws_arg_get_arg_log_size(max_mh_actions) >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Num of actions (%d) bigger than allowed\n",
+ max_mh_actions);
+ ret = -EINVAL;
+ goto free_new_pat;
+ }
+
+ /* Allocate single shared arg for all patterns based on the max size */
+ if (max_mh_actions > 1) {
+ ret = mlx5hws_arg_create_modify_header_arg(ctx,
+ pattern->data,
+ max_mh_actions,
+ log_bulk_size,
+ action->flags &
+ MLX5HWS_ACTION_FLAG_SHARED,
+ &arg_id);
+ if (ret)
+ goto free_new_pat;
+ }
+
+ for (i = 0; i < num_of_patterns; i++) {
+ if (!mlx5hws_pat_verify_actions(ctx, pattern[i].data, pattern[i].sz)) {
+ mlx5hws_err(ctx, "Fail to verify pattern modify actions\n");
+ ret = -EINVAL;
+ goto free_stc_and_pat;
+ }
+ num_actions = pattern[i].sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ action[i].modify_header.num_of_patterns = num_of_patterns;
+ action[i].modify_header.max_num_of_actions = max_mh_actions;
+
+ action[i].modify_header.require_reparse =
+ mlx5hws_pat_require_reparse(pattern[i].data, num_actions);
+
+ if (num_actions == 1) {
+ pat_id = 0;
+ /* Optimize single modify action to be used inline */
+ action[i].modify_header.single_action = pattern[i].data[0];
+ action[i].modify_header.single_action_type =
+ MLX5_GET(set_action_in, pattern[i].data, action_type);
+ } else {
+ /* Multiple modify actions require a pattern */
+ if (unlikely(action[i].modify_header.nope_locations)) {
+ size_t pattern_sz;
+
+ pattern_sz = action[i].modify_header.num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE;
+ ret =
+ mlx5hws_pat_get_pattern(ctx,
+ &new_pattern[i * pat_max_sz],
+ pattern_sz, &pat_id);
+ } else {
+ ret = mlx5hws_pat_get_pattern(ctx,
+ pattern[i].data,
+ pattern[i].sz,
+ &pat_id);
+ }
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Failed to allocate pattern for modify header\n");
+ goto free_stc_and_pat;
+ }
+
+ action[i].modify_header.arg_id = arg_id;
+ action[i].modify_header.pat_id = pat_id;
+ }
+ /* Allocate STC for each action representing a header */
+ ret = hws_action_create_stcs(&action[i], 0);
+ if (ret) {
+ if (pat_id)
+ mlx5hws_pat_put_pattern(ctx, pat_id);
+ goto free_stc_and_pat;
+ }
+ }
+
+ kfree(new_pattern);
+ return 0;
+
+free_stc_and_pat:
+ while (i--) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.pat_id)
+ mlx5hws_pat_put_pattern(ctx, action[i].modify_header.pat_id);
+ }
+
+ if (arg_id)
+ mlx5hws_arg_destroy(ctx, arg_id);
+free_new_pat:
+ kfree(new_pattern);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_modify_header(struct mlx5hws_context *ctx,
+ u8 num_of_patterns,
+ struct mlx5hws_action_mh_pattern *patterns,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+ int ret;
+
+ if (!num_of_patterns) {
+ mlx5hws_err(ctx, "Invalid number of patterns\n");
+ return NULL;
+ }
+ action = hws_action_create_generic_bulk(ctx, flags,
+ MLX5HWS_ACTION_TYP_MODIFY_HDR,
+ num_of_patterns);
+ if (!action)
+ return NULL;
+
+ if ((flags & MLX5HWS_ACTION_FLAG_SHARED) && (log_bulk_size || num_of_patterns > 1)) {
+ mlx5hws_err(ctx, "Action cannot be shared with requested pattern or size\n");
+ goto free_action;
+ }
+
+ ret = hws_action_create_modify_header_hws(action,
+ num_of_patterns,
+ patterns,
+ log_bulk_size);
+ if (ret)
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
+ size_t num_dest,
+ struct mlx5hws_action_dest_attr *dests,
+ bool ignore_flow_level,
+ u32 flow_source,
+ u32 flags)
+{
+ struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ struct mlx5hws_action *action;
+ u32 i /*, packet_reformat_id*/;
+ int ret;
+
+ if (num_dest <= 1) {
+ mlx5hws_err(ctx, "Action must have multiple dests\n");
+ return NULL;
+ }
+
+ if (flags == (MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ } else {
+ mlx5hws_err(ctx, "Action flags not supported\n");
+ return NULL;
+ }
+
+ dest_list = kcalloc(num_dest, sizeof(*dest_list), GFP_KERNEL);
+ if (!dest_list)
+ return NULL;
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5hws_action_type action_type = dests[i].dest->type;
+ struct mlx5hws_action *reformat_action = dests[i].reformat;
+
+ switch (action_type) {
+ case MLX5HWS_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = ignore_flow_level;
+ /* ToDo: In SW steering we have a handling of 'go to WIRE'
+ * destination here by upper layer setting 'is_wire_ft' flag
+ * if the destination is wire.
+ * This is because uplink should be last dest in the list.
+ */
+ break;
+ case MLX5HWS_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ default:
+ mlx5hws_err(ctx, "Unsupported action in dest_array\n");
+ goto free_dest_list;
+ }
+
+ if (reformat_action) {
+ mlx5hws_err(ctx, "dest_array with reformat action - unsupported\n");
+ goto free_dest_list;
+ }
+ }
+
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = hws_action_create_stcs(action, fw_island->ft_id);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
+
+ return action;
+
+free_action:
+ kfree(action);
+destroy_fw_island:
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, fw_island);
+free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(ctx->mdev,
+ dest_list[i].ext_reformat_id);
+ }
+ kfree(dest_list);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_insert_header(struct mlx5hws_context *ctx,
+ u8 num_of_hdrs,
+ struct mlx5hws_action_insert_header *hdrs,
+ u32 log_bulk_size,
+ u32 flags)
+{
+ struct mlx5hws_action_reformat_header *reformat_hdrs;
+ struct mlx5hws_action *action;
+ int ret;
+ int i;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_INSERT_HEADER);
+ if (!action)
+ return NULL;
+
+ reformat_hdrs = kcalloc(num_of_hdrs, sizeof(*reformat_hdrs), GFP_KERNEL);
+ if (!reformat_hdrs)
+ goto free_action;
+
+ for (i = 0; i < num_of_hdrs; i++) {
+ if (hdrs[i].offset % W_SIZE != 0) {
+ mlx5hws_err(ctx, "Header offset should be in WORD granularity\n");
+ goto free_reformat_hdrs;
+ }
+
+ action[i].reformat.anchor = hdrs[i].anchor;
+ action[i].reformat.encap = hdrs[i].encap;
+ action[i].reformat.offset = hdrs[i].offset;
+
+ reformat_hdrs[i].sz = hdrs[i].hdr.sz;
+ reformat_hdrs[i].data = hdrs[i].hdr.data;
+ }
+
+ ret = hws_action_handle_insert_with_ptr(action, num_of_hdrs,
+ reformat_hdrs, log_bulk_size);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create HWS reformat action\n");
+ goto free_reformat_hdrs;
+ }
+
+ kfree(reformat_hdrs);
+
+ return action;
+
+free_reformat_hdrs:
+ kfree(reformat_hdrs);
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_remove_header(struct mlx5hws_context *ctx,
+ struct mlx5hws_action_remove_header_attr *attr,
+ u32 flags)
+{
+ struct mlx5hws_action *action;
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_REMOVE_HEADER);
+ if (!action)
+ return NULL;
+
+ /* support only remove anchor with size */
+ if (attr->size % W_SIZE != 0) {
+ mlx5hws_err(ctx,
+ "Invalid size, HW supports header remove in WORD granularity\n");
+ goto free_action;
+ }
+
+ if (attr->size > MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE) {
+ mlx5hws_err(ctx, "Header removal size limited to %u bytes\n",
+ MLX5HWS_ACTION_REMOVE_HEADER_MAX_SIZE);
+ goto free_action;
+ }
+
+ action->remove_header.anchor = attr->anchor;
+ action->remove_header.size = attr->size / W_SIZE;
+
+ if (hws_action_create_stcs(action, 0))
+ goto free_action;
+
+ return action;
+
+free_action:
+ kfree(action);
+ return NULL;
+}
+
+static struct mlx5hws_definer *
+hws_action_create_dest_match_range_definer(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_definer *definer;
+ __be32 *tag;
+ int ret;
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ definer->dw_selector[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+ /* Set DW0 tag mask */
+ tag = (__force __be32 *)definer->mask.jumbo;
+ tag[MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0] = htonl(0xffffUL << 16);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0) {
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(definer);
+ return NULL;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+ definer->obj_id = ret;
+
+ return definer;
+}
+
+static struct mlx5hws_matcher_action_ste *
+hws_action_create_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u32 miss_ft_id)
+{
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ /* Check if STE range is supported */
+ if (!IS_BIT_SET(ctx->caps->supp_ste_format_gen_wqe, MLX5_IFC_RTC_STE_FORMAT_RANGE)) {
+ mlx5hws_err(ctx, "Range STE format not supported\n");
+ return NULL;
+ }
+
+ table_ste = kzalloc(sizeof(*table_ste), GFP_KERNEL);
+ if (!table_ste)
+ return NULL;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ pool_attr.table_type = MLX5HWS_TABLE_TYPE_FDB;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = 1;
+ table_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!table_ste->pool) {
+ mlx5hws_err(ctx, "Failed to allocate memory ste pool\n");
+ goto free_ste;
+ }
+
+ /* Allocate RTC */
+ rtc_0_id = &table_ste->rtc_0_id;
+ rtc_1_id = &table_ste->rtc_1_id;
+ ste_pool = table_ste->pool;
+ ste = &table_ste->ste;
+ ste->order = 1;
+
+ rtc_attr.log_size = 0;
+ rtc_attr.log_depth = 0;
+ rtc_attr.miss_ft_id = miss_ft_id;
+ rtc_attr.num_hash_definer = 1;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.fw_gen_wqe = true;
+ rtc_attr.is_scnd_range = true;
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[MLX5HWS_TABLE_TYPE_FDB];
+ default_stc = ctx->common_res[MLX5HWS_TABLE_TYPE_FDB].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create RTC");
+ goto pool_destroy;
+ }
+
+ /* Create mirror RTC */
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(MLX5HWS_TABLE_TYPE_FDB, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create mirror RTC");
+ goto destroy_rtc_0;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return table_ste;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+pool_destroy:
+ mlx5hws_pool_destroy(table_ste->pool);
+free_ste:
+ mutex_unlock(&ctx->ctrl_lock);
+ kfree(table_ste);
+ return NULL;
+}
+
+static void
+hws_action_destroy_dest_match_range_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, table_ste->rtc_0_id);
+ mlx5hws_pool_destroy(table_ste->pool);
+ kfree(table_ste);
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
+
+static int
+hws_action_create_dest_match_range_fill_table(struct mlx5hws_context *ctx,
+ struct mlx5hws_matcher_action_ste *table_ste,
+ struct mlx5hws_action *hit_ft_action,
+ struct mlx5hws_definer *range_definer,
+ u32 min, u32 max)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste match_wqe_data = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste range_wqe_data = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ u32 no_use, used_rtc_0_id, used_rtc_1_id, ret;
+ struct mlx5hws_context_common_res *common_res;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ __be32 *wqe_data_arr;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ /* Init default send STE attributes */
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.user_data = &no_use;
+ ste_attr.send_attr.rule = NULL;
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = true;
+ ste_attr.rtc_0 = table_ste->rtc_0_id;
+ ste_attr.rtc_1 = table_ste->rtc_1_id;
+ ste_attr.used_id_rtc_0 = &used_rtc_0_id;
+ ste_attr.used_id_rtc_1 = &used_rtc_1_id;
+
+ common_res = &ctx->common_res[MLX5HWS_TABLE_TYPE_FDB];
+
+ /* init an empty match STE which will always hit */
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe_data;
+ ste_attr.send_attr.match_definer_id = ctx->caps->trivial_match_definer;
+
+ /* Fill WQE control data */
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(common_res->default_stc->nop_ctr.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(common_res->default_stc->nop_dw5.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(common_res->default_stc->nop_dw6.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(common_res->default_stc->nop_dw7.offset);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 << 29);
+ wqe_ctrl.stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(hit_ft_action->stc[MLX5HWS_TABLE_TYPE_FDB].offset);
+
+ wqe_data_arr = (__force __be32 *)&range_wqe_data;
+
+ ste_attr.range_wqe_data = &range_wqe_data;
+ ste_attr.send_attr.len += MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5hws_definer_get_id(range_definer);
+
+ /* Fill range matching fields,
+ * min/max_value_2 corresponds to match_dw_0 in its definer,
+ * min_value_2 sets in DW0 in the STE and max_value_2 sets in DW1 in the STE.
+ */
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW0] = htonl(min << 16);
+ wqe_data_arr[MLX5HWS_MATCHER_OFFSET_TAG_DW1] = htonl(max << 16);
+
+ /* Send WQEs to FW */
+ mlx5hws_send_stes_fw(ctx, queue, &ste_attr);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to drain control queue");
+ goto error;
+ }
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+error:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_dest_match_range(struct mlx5hws_context *ctx,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min, u32 max, u32 flags)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_action *action;
+ u32 miss_ft_id = miss_ft->id;
+ u32 hit_ft_id = hit_ft->id;
+ int ret;
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5hws_err(ctx, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ definer = hws_action_create_dest_match_range_definer(ctx);
+ if (!definer)
+ goto free_action;
+
+ table_ste = hws_action_create_dest_match_range_table(ctx, definer, miss_ft_id);
+ if (!table_ste)
+ goto destroy_definer;
+
+ hit_ft_action = mlx5hws_action_create_dest_table_num(ctx, hit_ft_id, flags);
+ if (!hit_ft_action)
+ goto destroy_table_ste;
+
+ ret = hws_action_create_dest_match_range_fill_table(ctx, table_ste,
+ hit_ft_action,
+ definer, min, max);
+ if (ret)
+ goto destroy_hit_ft_action;
+
+ action->range.table_ste = table_ste;
+ action->range.definer = definer;
+ action->range.hit_ft_action = hit_ft_action;
+
+ /* Allocate STC for jumps to STE */
+ mutex_lock(&ctx->ctrl_lock);
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = table_ste->ste;
+ stc_attr.ste_table.ste_pool = table_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, MLX5HWS_TABLE_TYPE_FDB,
+ &action->stc[MLX5HWS_TABLE_TYPE_FDB]);
+ if (ret)
+ goto error_unlock;
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return action;
+
+error_unlock:
+ mutex_unlock(&ctx->ctrl_lock);
+destroy_hit_ft_action:
+ mlx5hws_action_destroy(hit_ft_action);
+destroy_table_ste:
+ hws_action_destroy_dest_match_range_table(ctx, table_ste);
+destroy_definer:
+ mlx5hws_definer_free(ctx, definer);
+free_action:
+ kfree(action);
+ mlx5hws_err(ctx, "Failed to create action dest match range");
+ return NULL;
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_last(struct mlx5hws_context *ctx, u32 flags)
+{
+ return hws_action_create_generic(ctx, flags, MLX5HWS_ACTION_TYP_LAST);
+}
+
+struct mlx5hws_action *
+mlx5hws_action_create_flow_sampler(struct mlx5hws_context *ctx,
+ u32 sampler_id, u32 flags)
+{
+ mlx5hws_err(ctx, "Flow sampler action - unsupported\n");
+ return NULL;
+}
+
+static void hws_action_destroy_hws(struct mlx5hws_action *action)
+{
+ u32 ext_reformat_id;
+ bool shared_arg;
+ u32 obj_id;
+ u32 i;
+
+ switch (action->type) {
+ case MLX5HWS_ACTION_TYP_MISS:
+ case MLX5HWS_ACTION_TYP_TAG:
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_CTR:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ hws_action_destroy_stcs(action);
+ break;
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ hws_action_destroy_stcs(action);
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP);
+ break;
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ hws_action_destroy_stcs(action);
+ mlx5hws_cmd_forward_tbl_destroy(action->ctx->mdev, action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ ext_reformat_id = action->dest_array.dest_list[i].ext_reformat_id;
+ if (ext_reformat_id)
+ mlx5hws_cmd_packet_reformat_destroy(action->ctx->mdev,
+ ext_reformat_id);
+ }
+ kfree(action->dest_array.dest_list);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ shared_arg = false;
+ for (i = 0; i < action->modify_header.num_of_patterns; i++) {
+ hws_action_destroy_stcs(&action[i]);
+ if (action[i].modify_header.num_of_actions > 1) {
+ mlx5hws_pat_put_pattern(action[i].ctx,
+ action[i].modify_header.pat_id);
+ /* Save shared arg object to be freed after */
+ obj_id = action[i].modify_header.arg_id;
+ shared_arg = true;
+ }
+ }
+ if (shared_arg)
+ mlx5hws_arg_destroy(action->ctx, obj_id);
+ break;
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ hws_action_put_shared_stc(action, MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3);
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ for (i = 0; i < action->reformat.num_of_hdrs; i++)
+ hws_action_destroy_stcs(&action[i]);
+ mlx5hws_arg_destroy(action->ctx, action->reformat.arg_id);
+ break;
+ case MLX5HWS_ACTION_TYP_RANGE:
+ hws_action_destroy_stcs(action);
+ hws_action_destroy_dest_match_range_table(action->ctx, action->range.table_ste);
+ mlx5hws_definer_free(action->ctx, action->range.definer);
+ mlx5hws_action_destroy(action->range.hit_ft_action);
+ break;
+ case MLX5HWS_ACTION_TYP_LAST:
+ break;
+ default:
+ pr_warn("HWS: Invalid action type: %d\n", action->type);
+ }
+}
+
+int mlx5hws_action_destroy(struct mlx5hws_action *action)
+{
+ hws_action_destroy_hws(action);
+
+ kfree(action);
+ return 0;
+}
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_action_default_stc *default_stc;
+ int ret;
+
+ if (ctx->common_res[tbl_type].default_stc) {
+ ctx->common_res[tbl_type].default_stc->refcount++;
+ return 0;
+ }
+
+ default_stc = kzalloc(sizeof(*default_stc), GFP_KERNEL);
+ if (!default_stc)
+ return -ENOMEM;
+
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_NOP;
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW0;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_ctr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default counter STC\n");
+ goto free_default_stc;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW5;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw5);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW5 STC\n");
+ goto free_nop_ctr;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW6;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw6);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW6 STC\n");
+ goto free_nop_dw5;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_DW7;
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->nop_dw7);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default NOP DW7 STC\n");
+ goto free_nop_dw6;
+ }
+
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_ALLOW;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
+ &default_stc->default_hit);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate default allow STC\n");
+ goto free_nop_dw7;
+ }
+
+ ctx->common_res[tbl_type].default_stc = default_stc;
+ ctx->common_res[tbl_type].default_stc->refcount++;
+
+ return 0;
+
+free_nop_dw7:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+free_nop_dw6:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+free_nop_dw5:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+free_nop_ctr:
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+free_default_stc:
+ kfree(default_stc);
+ return ret;
+}
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx, u8 tbl_type)
+__must_hold(&ctx->ctrl_lock)
+{
+ struct mlx5hws_action_default_stc *default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+
+ default_stc = ctx->common_res[tbl_type].default_stc;
+ if (--default_stc->refcount)
+ return;
+
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->default_hit);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw7);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw6);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_dw5);
+ mlx5hws_action_free_single_stc(ctx, tbl_type, &default_stc->nop_ctr);
+ kfree(default_stc);
+ ctx->common_res[tbl_type].default_stc = NULL;
+}
+
+static void hws_action_modify_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions,
+ u32 nope_locations)
+{
+ u8 *new_arg_data = NULL;
+ int i, j;
+
+ if (unlikely(nope_locations)) {
+ new_arg_data = kcalloc(num_of_actions,
+ MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (unlikely(!new_arg_data))
+ return;
+
+ for (i = 0, j = 0; i < num_of_actions; i++, j++) {
+ memcpy(&new_arg_data[j], arg_data, MLX5HWS_MODIFY_ACTION_SIZE);
+ if (BIT(i) & nope_locations)
+ j++;
+ }
+ }
+
+ mlx5hws_arg_write(queue, NULL, arg_idx,
+ new_arg_data ? new_arg_data : arg_data,
+ num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE);
+
+ kfree(new_arg_data);
+}
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst, u16 num_of_actions)
+{
+ u8 *e_src;
+ int i;
+
+ /* num_of_actions = remove l3l2 + 4/5 inserts + remove extra 2 bytes
+ * copy from end of src to the start of dst.
+ * move to the end, 2 is the leftover from 14B or 18B
+ */
+ if (num_of_actions == DECAP_L3_NUM_ACTIONS_W_NO_VLAN)
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2;
+ else
+ e_src = src + MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN;
+
+ /* Move dst over the first remove action + zero data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ /* Move dst over the first insert ctrl action */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE / 2;
+ /* Actions:
+ * no vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * with vlan: r_h-insert_4b-insert_4b-insert_4b-insert_4b-insert_4b-remove_2b.
+ * the loop is without the last insertion.
+ */
+ for (i = 0; i < num_of_actions - 3; i++) {
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE;
+ memcpy(dst, e_src, MLX5HWS_ACTION_INLINE_DATA_SIZE); /* data */
+ dst += MLX5HWS_ACTION_DOUBLE_SIZE;
+ }
+ /* Copy the last 2 bytes after a gap of 2 bytes which will be removed */
+ e_src -= MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ dst += MLX5HWS_ACTION_INLINE_DATA_SIZE / 2;
+ memcpy(dst, e_src, 2);
+}
+
+static int
+hws_action_get_shared_stc_offset(struct mlx5hws_context_common_res *common_res,
+ enum mlx5hws_context_shared_stc_type stc_type)
+{
+ return common_res->shared_stc[stc_type]->stc_chunk.offset;
+}
+
+static struct mlx5hws_actions_wqe_setter *
+hws_action_setter_find_first(struct mlx5hws_actions_wqe_setter *setter,
+ u8 req_flags)
+{
+ /* Use a new setter if requested flags are taken */
+ while (setter->flags & req_flags)
+ setter++;
+
+ /* Use current setter in required flags are not used */
+ return setter;
+}
+
+static void
+hws_action_apply_stc(struct mlx5hws_actions_apply_data *apply,
+ enum mlx5hws_action_stc_idx stc_idx,
+ u8 action_idx)
+{
+ struct mlx5hws_action *action = apply->rule_action[action_idx].action;
+
+ apply->wqe_ctrl->stc_ix[stc_idx] =
+ htonl(action->stc[apply->tbl_type].offset);
+}
+
+static void
+hws_action_setter_push_vlan(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = rule_action->push_vlan.vlan_hdr;
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_modify_header(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ u8 *single_action;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action;
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+
+ if (action->modify_header.num_of_actions == 1) {
+ if (action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_COPY ||
+ action->modify_header.single_action_type ==
+ MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ return;
+ }
+
+ if (action->flags & MLX5HWS_ACTION_FLAG_SHARED)
+ single_action = (u8 *)&action->modify_header.single_action;
+ else
+ single_action = rule_action->modify_header.data;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] =
+ *(__be32 *)MLX5_ADDR_OF(set_action_in, single_action, data);
+ } else {
+ /* Argument offset multiple with number of args per these actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->modify_header.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ hws_action_modify_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->modify_header.data,
+ action->modify_header.num_of_actions,
+ action->modify_header.nope_locations);
+ }
+ }
+}
+
+static void
+hws_action_setter_insert_ptr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_idx, arg_sz;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for header size */
+ arg_sz = mlx5hws_arg_data_size_to_arg_size(action->reformat.max_hdr_sz);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_write(apply->queue, NULL,
+ action->reformat.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->reformat.header_size);
+ }
+}
+
+static void
+hws_action_setter_tnl_l3_to_l2(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ struct mlx5hws_action *action;
+ u32 arg_sz, arg_idx;
+ __be32 stc_idx;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+ action = rule_action->action + rule_action->reformat.hdr_idx;
+
+ /* Argument offset multiple on args required for num of actions */
+ arg_sz = mlx5hws_arg_get_arg_size(action->modify_header.max_num_of_actions);
+ arg_idx = rule_action->reformat.offset * arg_sz;
+
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(arg_idx);
+
+ stc_idx = htonl(action->stc[apply->tbl_type].offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = stc_idx;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+
+ if (!(action->flags & MLX5HWS_ACTION_FLAG_SHARED)) {
+ apply->require_dep = 1;
+ mlx5hws_arg_decapl3_write(apply->queue,
+ action->modify_header.arg_id + arg_idx,
+ rule_action->reformat.data,
+ action->modify_header.num_of_actions);
+ }
+}
+
+static void
+hws_action_setter_aso(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+ u32 exe_aso_ctrl;
+ u32 offset;
+
+ rule_action = &apply->rule_action[setter->idx_double];
+
+ switch (rule_action->action->type) {
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* exe_aso_ctrl format:
+ * [STC only and reserved bits 29b][init_color 2b][meter_id 1b]
+ */
+ offset = rule_action->aso_meter.offset / MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl = rule_action->aso_meter.offset % MLX5_ASO_METER_NUM_PER_OBJ;
+ exe_aso_ctrl |= rule_action->aso_meter.init_color <<
+ MLX5HWS_ACTION_METER_INIT_COLOR_OFFSET;
+ break;
+ default:
+ mlx5hws_err(rule_action->action->ctx,
+ "Unsupported ASO action type: %d\n", rule_action->action->type);
+ return;
+ }
+
+ /* aso_object_offset format: [24B] */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = htonl(offset);
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = htonl(exe_aso_ctrl);
+
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW6, setter->idx_double);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+}
+
+static void
+hws_action_setter_tag(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_single];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = htonl(rule_action->tag.value);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_ctrl_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ struct mlx5hws_rule_action *rule_action;
+
+ rule_action = &apply->rule_action[setter->idx_ctr];
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = htonl(rule_action->counter.offset);
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_CTRL, setter->idx_ctr);
+}
+
+static void
+hws_action_setter_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_DW5, setter->idx_single);
+}
+
+static void
+hws_action_setter_single_double_pop(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP));
+}
+
+static void
+hws_action_setter_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+static void
+hws_action_setter_default_hit(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] =
+ htonl(apply->common_res->default_stc->default_hit.offset);
+}
+
+static void
+hws_action_setter_hit_next_action(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = htonl(apply->next_direct_idx << 6);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_HIT] = htonl(apply->jump_to_action_stc);
+}
+
+static void
+hws_action_setter_common_decap(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(hws_action_get_shared_stc_offset(apply->common_res,
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3));
+}
+
+static void
+hws_action_setter_range(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ /* Always jump to index zero */
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_HIT_LSB] = 0;
+ hws_action_apply_stc(apply, MLX5HWS_ACTION_STC_IDX_HIT, setter->idx_hit);
+}
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_actions_wqe_setter *start_setter = at->setters + 1;
+ enum mlx5hws_action_type *action_type = at->action_type_arr;
+ struct mlx5hws_actions_wqe_setter *setter = at->setters;
+ struct mlx5hws_actions_wqe_setter *pop_setter = NULL;
+ struct mlx5hws_actions_wqe_setter *last_setter;
+ int i;
+
+ /* Note: Given action combination must be valid */
+
+ /* Check if action were already processed */
+ if (at->num_of_action_stes)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_ACTION_MAX_STE; i++)
+ setter[i].set_hit = &hws_action_setter_hit_next_action;
+
+ /* The same action template setters can be used with jumbo or match
+ * STE, to support both cases we reserve the first setter for cases
+ * with jumbo STE to allow jump to the first action STE.
+ * This extra setter can be reduced in some cases on rule creation.
+ */
+ setter = start_setter;
+ last_setter = start_setter;
+
+ for (i = 0; i < at->num_actions; i++) {
+ switch (action_type[i]) {
+ case MLX5HWS_ACTION_TYP_DROP:
+ case MLX5HWS_ACTION_TYP_TBL:
+ case MLX5HWS_ACTION_TYP_DEST_ARRAY:
+ case MLX5HWS_ACTION_TYP_VPORT:
+ case MLX5HWS_ACTION_TYP_MISS:
+ /* Hit action */
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_hit;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_RANGE:
+ last_setter->flags |= ASF_HIT;
+ last_setter->set_hit = &hws_action_setter_range;
+ last_setter->idx_hit = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_POP_VLAN:
+ /* Single remove header to header */
+ if (pop_setter) {
+ /* We have 2 pops, use the shared */
+ pop_setter->set_single = &hws_action_setter_single_double_pop;
+ break;
+ }
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY |
+ ASF_INSERT);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ pop_setter = setter;
+ break;
+
+ case MLX5HWS_ACTION_TYP_PUSH_VLAN:
+ /* Double insert inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_push_vlan;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_MODIFY_HDR:
+ /* Double modify header list */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY;
+ setter->set_double = &hws_action_setter_modify_header;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ /* Double ASO action */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE);
+ setter->flags |= ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_aso;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REMOVE_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
+ /* Single remove header to header */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_MODIFY);
+ setter->flags |= ASF_SINGLE1 | ASF_REMOVE;
+ setter->set_single = &hws_action_setter_single;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_INSERT_HEADER:
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ /* Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_INSERT;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ /* Single remove + Double insert header with pointer */
+ setter = hws_action_setter_find_first(last_setter,
+ ASF_SINGLE1 | ASF_DOUBLE);
+ setter->flags |= ASF_SINGLE1 | ASF_DOUBLE;
+ setter->set_double = &hws_action_setter_insert_ptr;
+ setter->idx_double = i;
+ setter->set_single = &hws_action_setter_common_decap;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
+ /* Double modify header list with remove and push inline */
+ setter = hws_action_setter_find_first(last_setter, ASF_DOUBLE | ASF_REMOVE);
+ setter->flags |= ASF_DOUBLE | ASF_MODIFY | ASF_INSERT;
+ setter->set_double = &hws_action_setter_tnl_l3_to_l2;
+ setter->idx_double = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_TAG:
+ /* Single TAG action, search for any room from the start */
+ setter = hws_action_setter_find_first(start_setter, ASF_SINGLE1);
+ setter->flags |= ASF_SINGLE1;
+ setter->set_single = &hws_action_setter_tag;
+ setter->idx_single = i;
+ break;
+
+ case MLX5HWS_ACTION_TYP_CTR:
+ /* Control counter action
+ * TODO: Current counter executed first. Support is needed
+ * for single ation counter action which is done last.
+ * Example: Decap + CTR
+ */
+ setter = hws_action_setter_find_first(start_setter, ASF_CTR);
+ setter->flags |= ASF_CTR;
+ setter->set_ctr = &hws_action_setter_ctrl_ctr;
+ setter->idx_ctr = i;
+ break;
+ default:
+ pr_warn("HWS: Invalid action type in processingaction template: action_type[%d]=%d\n",
+ i, action_type[i]);
+ return -EOPNOTSUPP;
+ }
+
+ last_setter = max(setter, last_setter);
+ }
+
+ /* Set default hit on the last STE if no hit action provided */
+ if (!(last_setter->flags & ASF_HIT))
+ last_setter->set_hit = &hws_action_setter_default_hit;
+
+ at->num_of_action_stes = last_setter - start_setter + 1;
+
+ /* Check if action template doesn't require any action DWs */
+ at->only_term = (at->num_of_action_stes == 1) &&
+ !(last_setter->flags & ~(ASF_CTR | ASF_HIT));
+
+ return 0;
+}
+
+struct mlx5hws_action_template *
+mlx5hws_action_template_create(enum mlx5hws_action_type action_type[])
+{
+ struct mlx5hws_action_template *at;
+ u8 num_actions = 0;
+ int i;
+
+ at = kzalloc(sizeof(*at), GFP_KERNEL);
+ if (!at)
+ return NULL;
+
+ while (action_type[num_actions++] != MLX5HWS_ACTION_TYP_LAST)
+ ;
+
+ at->num_actions = num_actions - 1;
+ at->action_type_arr = kcalloc(num_actions, sizeof(*action_type), GFP_KERNEL);
+ if (!at->action_type_arr)
+ goto free_at;
+
+ for (i = 0; i < num_actions; i++)
+ at->action_type_arr[i] = action_type[i];
+
+ return at;
+
+free_at:
+ kfree(at);
+ return NULL;
+}
+
+int mlx5hws_action_template_destroy(struct mlx5hws_action_template *at)
+{
+ kfree(at->action_type_arr);
+ kfree(at);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
new file mode 100644
index 000000000000..bf5c1b241006
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_action.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_ACTION_H_
+#define MLX5HWS_ACTION_H_
+
+/* Max number of STEs needed for a rule (including match) */
+#define MLX5HWS_ACTION_MAX_STE 20
+
+/* Max number of internal subactions of ipv6_ext */
+#define MLX5HWS_ACTION_IPV6_EXT_MAX_SA 4
+
+enum mlx5hws_action_stc_idx {
+ MLX5HWS_ACTION_STC_IDX_CTRL = 0,
+ MLX5HWS_ACTION_STC_IDX_HIT = 1,
+ MLX5HWS_ACTION_STC_IDX_DW5 = 2,
+ MLX5HWS_ACTION_STC_IDX_DW6 = 3,
+ MLX5HWS_ACTION_STC_IDX_DW7 = 4,
+ MLX5HWS_ACTION_STC_IDX_MAX = 5,
+ /* STC Jumvo STE combo: CTR, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE = 1,
+ /* STC combo1: CTR, SINGLE, DOUBLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 = 3,
+ /* STC combo2: CTR, 3 x SINGLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2 = 4,
+ /* STC combo2: CTR, TRIPLE, Hit */
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO3 = 2,
+};
+
+enum mlx5hws_action_offset {
+ MLX5HWS_ACTION_OFFSET_DW0 = 0,
+ MLX5HWS_ACTION_OFFSET_DW5 = 5,
+ MLX5HWS_ACTION_OFFSET_DW6 = 6,
+ MLX5HWS_ACTION_OFFSET_DW7 = 7,
+ MLX5HWS_ACTION_OFFSET_HIT = 3,
+ MLX5HWS_ACTION_OFFSET_HIT_LSB = 4,
+};
+
+enum {
+ MLX5HWS_ACTION_DOUBLE_SIZE = 8,
+ MLX5HWS_ACTION_INLINE_DATA_SIZE = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_MACS = 12,
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN = 4,
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER = 2,
+ MLX5HWS_ACTION_HDR_LEN_L2 = (MLX5HWS_ACTION_HDR_LEN_L2_MACS +
+ MLX5HWS_ACTION_HDR_LEN_L2_ETHER),
+ MLX5HWS_ACTION_HDR_LEN_L2_W_VLAN = (MLX5HWS_ACTION_HDR_LEN_L2 +
+ MLX5HWS_ACTION_HDR_LEN_L2_VLAN),
+ MLX5HWS_ACTION_REFORMAT_DATA_SIZE = 64,
+ DECAP_L3_NUM_ACTIONS_W_NO_VLAN = 6,
+ DECAP_L3_NUM_ACTIONS_W_VLAN = 7,
+};
+
+enum mlx5hws_action_setter_flag {
+ ASF_SINGLE1 = 1 << 0,
+ ASF_SINGLE2 = 1 << 1,
+ ASF_SINGLE3 = 1 << 2,
+ ASF_DOUBLE = ASF_SINGLE2 | ASF_SINGLE3,
+ ASF_TRIPLE = ASF_SINGLE1 | ASF_DOUBLE,
+ ASF_INSERT = 1 << 3,
+ ASF_REMOVE = 1 << 4,
+ ASF_MODIFY = 1 << 5,
+ ASF_CTR = 1 << 6,
+ ASF_HIT = 1 << 7,
+};
+
+struct mlx5hws_action_default_stc {
+ struct mlx5hws_pool_chunk nop_ctr;
+ struct mlx5hws_pool_chunk nop_dw5;
+ struct mlx5hws_pool_chunk nop_dw6;
+ struct mlx5hws_pool_chunk nop_dw7;
+ struct mlx5hws_pool_chunk default_hit;
+ u32 refcount;
+};
+
+struct mlx5hws_action_shared_stc {
+ struct mlx5hws_pool_chunk stc_chunk;
+ u32 refcount;
+};
+
+struct mlx5hws_actions_apply_data {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_rule_action *rule_action;
+ __be32 *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ u32 jump_to_action_stc;
+ struct mlx5hws_context_common_res *common_res;
+ enum mlx5hws_table_type tbl_type;
+ u32 next_direct_idx;
+ u8 require_dep;
+};
+
+struct mlx5hws_actions_wqe_setter;
+
+typedef void (*mlx5hws_action_setter_fp)(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter);
+
+struct mlx5hws_actions_wqe_setter {
+ mlx5hws_action_setter_fp set_single;
+ mlx5hws_action_setter_fp set_double;
+ mlx5hws_action_setter_fp set_triple;
+ mlx5hws_action_setter_fp set_hit;
+ mlx5hws_action_setter_fp set_ctr;
+ u8 idx_single;
+ u8 idx_double;
+ u8 idx_triple;
+ u8 idx_ctr;
+ u8 idx_hit;
+ u8 stage_idx;
+ u8 flags;
+};
+
+struct mlx5hws_action_template {
+ struct mlx5hws_actions_wqe_setter setters[MLX5HWS_ACTION_MAX_STE];
+ enum mlx5hws_action_type *action_type_arr;
+ u8 num_of_action_stes;
+ u8 num_actions;
+ u8 only_term;
+};
+
+struct mlx5hws_action {
+ u8 type;
+ u8 flags;
+ struct mlx5hws_context *ctx;
+ union {
+ struct {
+ struct mlx5hws_pool_chunk stc[MLX5HWS_TABLE_TYPE_MAX];
+ union {
+ struct {
+ u32 pat_id;
+ u32 arg_id;
+ __be64 single_action;
+ u32 nope_locations;
+ u8 num_of_patterns;
+ u8 single_action_type;
+ u8 num_of_actions;
+ u8 max_num_of_actions;
+ u8 require_reparse;
+ } modify_header;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u16 max_hdr_sz;
+ u8 num_of_hdrs;
+ u8 anchor;
+ u8 e_anchor;
+ u8 offset;
+ bool encap;
+ u8 require_reparse;
+ } reformat;
+ struct {
+ u32 obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ bool esw_owner_vhca_id_valid;
+ } vport;
+ struct {
+ u32 obj_id;
+ } dest_obj;
+ struct {
+ struct mlx5hws_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5hws_cmd_set_fte_dest *dest_list;
+ } dest_array;
+ struct {
+ u8 type;
+ u8 start_anchor;
+ u8 end_anchor;
+ u8 num_of_words;
+ bool decap;
+ } insert_hdr;
+ struct {
+ /* PRM start anchor from which header will be removed */
+ u8 anchor;
+ /* Header remove offset in bytes, from the start
+ * anchor to the location where remove header starts.
+ */
+ u8 offset;
+ /* Indicates the removed header size in bytes */
+ size_t size;
+ } remove_header;
+ struct {
+ struct mlx5hws_matcher_action_ste *table_ste;
+ struct mlx5hws_action *hit_ft_action;
+ struct mlx5hws_definer *definer;
+ } range;
+ };
+ };
+
+ struct ibv_flow_action *flow_action;
+ u32 obj_id;
+ struct ibv_qp *qp;
+ };
+};
+
+const char *mlx5hws_action_type_to_str(enum mlx5hws_action_type action_type);
+
+int mlx5hws_action_get_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_put_default_stc(struct mlx5hws_context *ctx,
+ u8 tbl_type);
+
+void mlx5hws_action_prepare_decap_l3_data(u8 *src, u8 *dst,
+ u16 num_of_actions);
+
+int mlx5hws_action_template_process(struct mlx5hws_action_template *at);
+
+bool mlx5hws_action_check_combo(struct mlx5hws_context *ctx,
+ enum mlx5hws_action_type *user_actions,
+ enum mlx5hws_table_type table_type);
+
+int mlx5hws_action_alloc_single_stc(struct mlx5hws_context *ctx,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+void mlx5hws_action_free_single_stc(struct mlx5hws_context *ctx,
+ u32 table_type,
+ struct mlx5hws_pool_chunk *stc);
+
+static inline void
+mlx5hws_action_setter_default_single(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] =
+ htonl(apply->common_res->default_stc->nop_dw5.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_double(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] =
+ htonl(apply->common_res->default_stc->nop_dw6.offset);
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] =
+ htonl(apply->common_res->default_stc->nop_dw7.offset);
+}
+
+static inline void
+mlx5hws_action_setter_default_ctr(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter)
+{
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW0] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] =
+ htonl(apply->common_res->default_stc->nop_ctr.offset);
+}
+
+static inline void
+mlx5hws_action_apply_setter(struct mlx5hws_actions_apply_data *apply,
+ struct mlx5hws_actions_wqe_setter *setter,
+ bool is_jumbo)
+{
+ u8 num_of_actions;
+
+ /* Set control counter */
+ if (setter->set_ctr)
+ setter->set_ctr(apply, setter);
+ else
+ mlx5hws_action_setter_default_ctr(apply, setter);
+
+ if (!is_jumbo) {
+ if (unlikely(setter->set_triple)) {
+ /* Set triple on match */
+ setter->set_triple(apply, setter);
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_COMBO3;
+ } else {
+ /* Set single and double on match */
+ if (setter->set_single)
+ setter->set_single(apply, setter);
+ else
+ mlx5hws_action_setter_default_single(apply, setter);
+
+ if (setter->set_double)
+ setter->set_double(apply, setter);
+ else
+ mlx5hws_action_setter_default_double(apply, setter);
+
+ num_of_actions = setter->set_double ?
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO1 :
+ MLX5HWS_ACTION_STC_IDX_LAST_COMBO2;
+ }
+ } else {
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW5] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW6] = 0;
+ apply->wqe_data[MLX5HWS_ACTION_OFFSET_DW7] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW5] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW6] = 0;
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_DW7] = 0;
+ num_of_actions = MLX5HWS_ACTION_STC_IDX_LAST_JUMBO_STE;
+ }
+
+ /* Set next/final hit action */
+ setter->set_hit(apply, setter);
+
+ /* Set number of actions */
+ apply->wqe_ctrl->stc_ix[MLX5HWS_ACTION_STC_IDX_CTRL] |=
+ htonl(num_of_actions << 29);
+}
+
+#endif /* MLX5HWS_ACTION_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
new file mode 100644
index 000000000000..e6ed66202a40
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static int hws_buddy_init(struct mlx5hws_buddy_mem *buddy, u32 max_order)
+{
+ int i, s, ret = 0;
+
+ buddy->max_order = max_order;
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ if (!buddy->bitmap)
+ return -ENOMEM;
+
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+ if (!buddy->num_free) {
+ ret = -ENOMEM;
+ goto err_out_free_bits;
+ }
+
+ for (i = 0; i <= (int)buddy->max_order; ++i) {
+ s = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(s, GFP_KERNEL);
+ if (!buddy->bitmap[i]) {
+ ret = -ENOMEM;
+ goto err_out_free_num_free;
+ }
+ }
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_num_free:
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+
+err_out_free_bits:
+ kfree(buddy->bitmap);
+ return ret;
+}
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = kzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ return NULL;
+
+ if (hws_buddy_init(buddy, max_order))
+ goto free_buddy;
+
+ return buddy;
+
+free_buddy:
+ kfree(buddy);
+ return NULL;
+}
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy)
+{
+ int i;
+
+ for (i = 0; i <= (int)buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int hws_buddy_find_free_seg(struct mlx5hws_buddy_mem *buddy,
+ u32 start_order,
+ u32 *segment,
+ u32 *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order)
+{
+ u32 seg, order_iter, err;
+
+ err = hws_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+
+ return seg;
+}
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order)
+{
+ seg >>= order;
+
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+
+ bitmap_set(buddy->bitmap[order], seg, 1);
+ ++buddy->num_free[order];
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
new file mode 100644
index 000000000000..338c44bbedaf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_buddy.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BUDDY_H_
+#define MLX5HWS_BUDDY_H_
+
+struct mlx5hws_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+};
+
+struct mlx5hws_buddy_mem *mlx5hws_buddy_create(u32 max_order);
+
+void mlx5hws_buddy_cleanup(struct mlx5hws_buddy_mem *buddy);
+
+int mlx5hws_buddy_alloc_mem(struct mlx5hws_buddy_mem *buddy, u32 order);
+
+void mlx5hws_buddy_free_mem(struct mlx5hws_buddy_mem *buddy, u32 seg, u32 order);
+
+#endif /* MLX5HWS_BUDDY_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
new file mode 100644
index 000000000000..bd52b05db367
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.c
@@ -0,0 +1,997 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static u16 hws_bwc_gen_queue_idx(struct mlx5hws_context *ctx)
+{
+ /* assign random queue */
+ return get_random_u8() % mlx5hws_bwc_queues(ctx);
+}
+
+static u16
+hws_bwc_get_burst_th(struct mlx5hws_context *ctx, u16 queue_id)
+{
+ return min(ctx->send_queue[queue_id].num_entries / 2,
+ MLX5HWS_BWC_MATCHER_REHASH_BURST_TH);
+}
+
+static struct mutex *
+hws_bwc_get_queue_lock(struct mlx5hws_context *ctx, u16 idx)
+{
+ return &ctx->bwc_send_queue_locks[idx];
+}
+
+static void hws_bwc_lock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i;
+
+ for (i = 0; i < bwc_queues; i++) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_lock(queue_lock);
+ }
+}
+
+static void hws_bwc_unlock_all_queues(struct mlx5hws_context *ctx)
+{
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mutex *queue_lock; /* Protect the queue */
+ int i = bwc_queues;
+
+ while (i--) {
+ queue_lock = hws_bwc_get_queue_lock(ctx, i);
+ mutex_unlock(queue_lock);
+ }
+}
+
+static void hws_bwc_matcher_init_attr(struct mlx5hws_matcher_attr *attr,
+ u32 priority,
+ u8 size_log)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->priority = priority;
+ attr->optimize_using_rule_idx = 0;
+ attr->mode = MLX5HWS_MATCHER_RESOURCE_MODE_RULE;
+ attr->optimize_flow_src = MLX5HWS_MATCHER_FLOW_SRC_ANY;
+ attr->insert_mode = MLX5HWS_MATCHER_INSERT_BY_HASH;
+ attr->distribute_mode = MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH;
+ attr->rule.num_log = size_log;
+ attr->resizable = true;
+ attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
+}
+
+int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[])
+{
+ enum mlx5hws_action_type init_action_types[1] = { MLX5HWS_ACTION_TYP_LAST };
+ struct mlx5hws_context *ctx = table->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_matcher_attr attr = {0};
+ int i;
+
+ bwc_matcher->rules = kcalloc(bwc_queues, sizeof(*bwc_matcher->rules), GFP_KERNEL);
+ if (!bwc_matcher->rules)
+ goto err;
+
+ for (i = 0; i < bwc_queues; i++)
+ INIT_LIST_HEAD(&bwc_matcher->rules[i]);
+
+ hws_bwc_matcher_init_attr(&attr,
+ priority,
+ MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG);
+
+ bwc_matcher->priority = priority;
+ bwc_matcher->size_log = MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG;
+
+ /* create dummy action template */
+ bwc_matcher->at[0] =
+ mlx5hws_action_template_create(action_types ?
+ action_types : init_action_types);
+ if (!bwc_matcher->at[0]) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating action template\n");
+ goto free_bwc_matcher_rules;
+ }
+
+ bwc_matcher->num_of_at = 1;
+
+ bwc_matcher->mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!bwc_matcher->mt) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating match template\n");
+ goto free_at;
+ }
+
+ bwc_matcher->matcher = mlx5hws_matcher_create(table,
+ &bwc_matcher->mt, 1,
+ &bwc_matcher->at[0],
+ bwc_matcher->num_of_at,
+ &attr);
+ if (!bwc_matcher->matcher) {
+ mlx5hws_err(table->ctx, "BWC matcher: failed creating HWS matcher\n");
+ goto free_mt;
+ }
+
+ return 0;
+
+free_mt:
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+free_at:
+ mlx5hws_action_template_destroy(bwc_matcher->at[0]);
+free_bwc_matcher_rules:
+ kfree(bwc_matcher->rules);
+err:
+ return -EINVAL;
+}
+
+struct mlx5hws_bwc_matcher *
+mlx5hws_bwc_matcher_create(struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ bool is_complex;
+ int ret;
+
+ if (!mlx5hws_context_bwc_supported(table->ctx)) {
+ mlx5hws_err(table->ctx,
+ "BWC matcher: context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
+ if (!bwc_matcher)
+ return NULL;
+
+ /* Check if the required match params can be all matched
+ * in single STE, otherwise complex matcher is needed.
+ */
+
+ is_complex = mlx5hws_bwc_match_params_is_complex(table->ctx, match_criteria_enable, mask);
+ if (is_complex)
+ ret = mlx5hws_bwc_matcher_create_complex(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask);
+ else
+ ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
+ table,
+ priority,
+ match_criteria_enable,
+ mask,
+ NULL);
+ if (ret)
+ goto free_bwc_matcher;
+
+ return bwc_matcher;
+
+free_bwc_matcher:
+ kfree(bwc_matcher);
+
+ return NULL;
+}
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ int i;
+
+ mlx5hws_matcher_destroy(bwc_matcher->matcher);
+ bwc_matcher->matcher = NULL;
+
+ for (i = 0; i < bwc_matcher->num_of_at; i++)
+ mlx5hws_action_template_destroy(bwc_matcher->at[i]);
+
+ mlx5hws_match_template_destroy(bwc_matcher->mt);
+ kfree(bwc_matcher->rules);
+
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ if (bwc_matcher->num_of_rules)
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "BWC matcher destroy: matcher still has %d rules\n",
+ bwc_matcher->num_of_rules);
+
+ mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
+
+ kfree(bwc_matcher);
+ return 0;
+}
+
+static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 *pending_rules,
+ bool drain)
+{
+ struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
+ u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
+ bool got_comp = *pending_rules >= burst_th;
+ bool queue_full;
+ int err = 0;
+ int ret;
+ int i;
+
+ /* Check if there are any completions at all */
+ if (!got_comp && !drain)
+ return 0;
+
+ queue_full = mlx5hws_send_engine_full(&ctx->send_queue[queue_id]);
+ while (queue_full || ((got_comp || drain) && *pending_rules)) {
+ ret = mlx5hws_send_queue_poll(ctx, queue_id, comp, burst_th);
+ if (unlikely(ret < 0)) {
+ mlx5hws_err(ctx, "BWC poll error: polling queue %d returned %d\n",
+ queue_id, ret);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ (*pending_rules) -= ret;
+ for (i = 0; i < ret; i++) {
+ if (unlikely(comp[i].status != MLX5HWS_FLOW_OP_SUCCESS)) {
+ mlx5hws_err(ctx,
+ "BWC poll error: polling queue %d returned completion with error\n",
+ queue_id);
+ err = -EINVAL;
+ }
+ }
+ queue_full = false;
+ }
+
+ got_comp = !!ret;
+ }
+
+ return err;
+}
+
+void
+mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ /* no use of INSERT_BY_INDEX in bwc rule */
+ rule_attr->rule_idx = 0;
+
+ /* notify HW at each rule insertion/deletion */
+ rule_attr->burst = 0;
+
+ /* We don't need user data, but the API requires it to exist */
+ rule_attr->user_data = (void *)0xFACADE;
+
+ rule_attr->queue_id = mlx5hws_bwc_get_queue_id(ctx, bwc_queue_idx);
+ rule_attr->flow_source = flow_source;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_bwc_rule *bwc_rule;
+
+ bwc_rule = kzalloc(sizeof(*bwc_rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule))
+ goto out_err;
+
+ bwc_rule->rule = kzalloc(sizeof(*bwc_rule->rule), GFP_KERNEL);
+ if (unlikely(!bwc_rule->rule))
+ goto free_rule;
+
+ bwc_rule->bwc_matcher = bwc_matcher;
+ return bwc_rule;
+
+free_rule:
+ kfree(bwc_rule);
+out_err:
+ return NULL;
+}
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ if (likely(bwc_rule->rule))
+ kfree(bwc_rule->rule);
+ kfree(bwc_rule);
+}
+
+static void hws_bwc_rule_list_add(struct mlx5hws_bwc_rule *bwc_rule, u16 idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules++;
+ bwc_rule->bwc_queue_idx = idx;
+ list_add(&bwc_rule->list_node, &bwc_matcher->rules[idx]);
+}
+
+static void hws_bwc_rule_list_remove(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+
+ bwc_matcher->num_of_rules--;
+ list_del_init(&bwc_rule->list_node);
+}
+
+static int
+hws_bwc_rule_destroy_hws_async(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ return mlx5hws_rule_destroy(bwc_rule->rule, attr);
+}
+
+static int
+hws_bwc_rule_destroy_hws_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_flow_op_result completion;
+ int ret;
+
+ ret = hws_bwc_rule_destroy_hws_async(bwc_rule, rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ do {
+ ret = mlx5hws_send_queue_poll(ctx, rule_attr->queue_id, &completion, 1);
+ } while (ret != 1);
+
+ if (unlikely(completion.status != MLX5HWS_FLOW_OP_SUCCESS ||
+ (bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETED &&
+ bwc_rule->rule->status != MLX5HWS_RULE_STATUS_DELETING))) {
+ mlx5hws_err(ctx, "Failed destroying BWC rule: completion %d, rule status %d\n",
+ completion.status, bwc_rule->rule->status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 idx = bwc_rule->bwc_queue_idx;
+ struct mlx5hws_rule_attr attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int ret;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_destroy_hws_sync(bwc_rule, &attr);
+ hws_bwc_rule_list_remove(bwc_rule);
+
+ mutex_unlock(queue_lock);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ int ret;
+
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return ret;
+}
+
+static int
+hws_bwc_rule_create_async(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ return mlx5hws_rule_create(bwc_rule->bwc_matcher->matcher,
+ 0, /* only one match template supported */
+ match_param,
+ at_idx,
+ rule_actions,
+ rule_attr,
+ bwc_rule->rule);
+}
+
+static int
+hws_bwc_rule_create_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+
+{
+ struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = hws_bwc_rule_create_async(bwc_rule, match_param,
+ at_idx, rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+
+ return ret;
+}
+
+static int
+hws_bwc_rule_update_sync(struct mlx5hws_bwc_rule *bwc_rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *rule_attr)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u32 expected_completions = 1;
+ int ret;
+
+ ret = mlx5hws_rule_action_update(bwc_rule->rule,
+ at_idx,
+ rule_actions,
+ rule_attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr->queue_id, &expected_completions, true);
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "Failed updating BWC rule (%d)\n", ret);
+
+ return ret;
+}
+
+static bool
+hws_bwc_matcher_size_maxed_out(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_cmd_query_caps *caps = bwc_matcher->matcher->tbl->ctx->caps;
+
+ return bwc_matcher->size_log + MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH >=
+ caps->ste_alloc_log_max - 1;
+}
+
+static bool
+hws_bwc_matcher_rehash_size_needed(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u32 num_of_rules)
+{
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher)))
+ return false;
+
+ if (unlikely((num_of_rules * 100 / MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH) >=
+ (1UL << bwc_matcher->size_log)))
+ return true;
+
+ return false;
+}
+
+static void
+hws_bwc_rule_actions_to_action_types(struct mlx5hws_rule_action rule_actions[],
+ enum mlx5hws_action_type action_types[])
+{
+ int i = 0;
+
+ for (i = 0;
+ rule_actions[i].action && (rule_actions[i].action->type != MLX5HWS_ACTION_TYP_LAST);
+ i++) {
+ action_types[i] = (enum mlx5hws_action_type)rule_actions[i].action->type;
+ }
+
+ action_types[i] = MLX5HWS_ACTION_TYP_LAST;
+}
+
+static int
+hws_bwc_matcher_extend_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type action_types[MLX5HWS_BWC_MAX_ACTS];
+
+ hws_bwc_rule_actions_to_action_types(rule_actions, action_types);
+
+ bwc_matcher->at[bwc_matcher->num_of_at] =
+ mlx5hws_action_template_create(action_types);
+
+ if (unlikely(!bwc_matcher->at[bwc_matcher->num_of_at]))
+ return -ENOMEM;
+
+ bwc_matcher->num_of_at++;
+ return 0;
+}
+
+static int
+hws_bwc_matcher_extend_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ if (unlikely(hws_bwc_matcher_size_maxed_out(bwc_matcher))) {
+ mlx5hws_err(ctx, "Can't resize matcher: depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -ENOMEM;
+ }
+
+ bwc_matcher->size_log =
+ min(bwc_matcher->size_log + MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ caps->ste_alloc_log_max - MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_find_at(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ enum mlx5hws_action_type *action_type_arr;
+ int i, j;
+
+ /* start from index 1 - first action template is a dummy */
+ for (i = 1; i < bwc_matcher->num_of_at; i++) {
+ j = 0;
+ action_type_arr = bwc_matcher->at[i]->action_type_arr;
+
+ while (rule_actions[j].action &&
+ rule_actions[j].action->type != MLX5HWS_ACTION_TYP_LAST) {
+ if (action_type_arr[j] != rule_actions[j].action->type)
+ break;
+ j++;
+ }
+
+ if (action_type_arr[j] == MLX5HWS_ACTION_TYP_LAST &&
+ (!rule_actions[j].action ||
+ rule_actions[j].action->type == MLX5HWS_ACTION_TYP_LAST))
+ return i;
+ }
+
+ return -1;
+}
+
+static int hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule **bwc_rules;
+ struct mlx5hws_rule_attr rule_attr;
+ u32 *pending_rules;
+ int i, j, ret = 0;
+ bool all_done;
+ u16 burst_th;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
+
+ pending_rules = kcalloc(bwc_queues, sizeof(*pending_rules), GFP_KERNEL);
+ if (!pending_rules)
+ return -ENOMEM;
+
+ bwc_rules = kcalloc(bwc_queues, sizeof(*bwc_rules), GFP_KERNEL);
+ if (!bwc_rules) {
+ ret = -ENOMEM;
+ goto free_pending_rules;
+ }
+
+ for (i = 0; i < bwc_queues; i++) {
+ if (list_empty(&bwc_matcher->rules[i]))
+ bwc_rules[i] = NULL;
+ else
+ bwc_rules[i] = list_first_entry(&bwc_matcher->rules[i],
+ struct mlx5hws_bwc_rule,
+ list_node);
+ }
+
+ do {
+ all_done = true;
+
+ for (i = 0; i < bwc_queues; i++) {
+ rule_attr.queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+ burst_th = hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
+
+ for (j = 0; j < burst_th && bwc_rules[i]; j++) {
+ rule_attr.burst = !!((j + 1) % burst_th);
+ ret = mlx5hws_matcher_resize_rule_move(bwc_matcher->matcher,
+ bwc_rules[i]->rule,
+ &rule_attr);
+ if (unlikely(ret)) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule failed during rehash (%d)\n",
+ ret);
+ goto free_bwc_rules;
+ }
+
+ all_done = false;
+ pending_rules[i]++;
+ bwc_rules[i] = list_is_last(&bwc_rules[i]->list_node,
+ &bwc_matcher->rules[i]) ?
+ NULL : list_next_entry(bwc_rules[i], list_node);
+
+ ret = hws_bwc_queue_poll(ctx, rule_attr.queue_id,
+ &pending_rules[i], false);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+ } while (!all_done);
+
+ /* drain all the bwc queues */
+ for (i = 0; i < bwc_queues; i++) {
+ if (pending_rules[i]) {
+ u16 queue_id = mlx5hws_bwc_get_queue_id(ctx, i);
+
+ mlx5hws_send_engine_flush_queue(&ctx->send_queue[queue_id]);
+ ret = hws_bwc_queue_poll(ctx, queue_id,
+ &pending_rules[i], true);
+ if (unlikely(ret))
+ goto free_bwc_rules;
+ }
+ }
+
+free_bwc_rules:
+ kfree(bwc_rules);
+free_pending_rules:
+ kfree(pending_rules);
+
+ return ret;
+}
+
+static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ return hws_bwc_matcher_move_all_simple(bwc_matcher);
+}
+
+static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher_attr matcher_attr = {0};
+ struct mlx5hws_matcher *old_matcher;
+ struct mlx5hws_matcher *new_matcher;
+ int ret;
+
+ hws_bwc_matcher_init_attr(&matcher_attr,
+ bwc_matcher->priority,
+ bwc_matcher->size_log);
+
+ old_matcher = bwc_matcher->matcher;
+ new_matcher = mlx5hws_matcher_create(old_matcher->tbl,
+ &bwc_matcher->mt, 1,
+ bwc_matcher->at,
+ bwc_matcher->num_of_at,
+ &matcher_attr);
+ if (!new_matcher) {
+ mlx5hws_err(ctx, "Rehash error: matcher creation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = mlx5hws_matcher_resize_set_target(old_matcher, new_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: failed setting resize target\n");
+ return ret;
+ }
+
+ ret = hws_bwc_matcher_move_all(bwc_matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Rehash error: moving rules failed\n");
+ return -ENOMEM;
+ }
+
+ bwc_matcher->matcher = new_matcher;
+ mlx5hws_matcher_destroy(old_matcher);
+
+ return 0;
+}
+
+static int
+hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ u32 num_of_rules;
+ int ret;
+
+ /* If the current matcher size is already at its max size, we can't
+ * do the rehash. Skip it and try adding the rule again - perhaps
+ * there was some change.
+ */
+ if (hws_bwc_matcher_size_maxed_out(bwc_matcher))
+ return 0;
+
+ /* It is possible that other rule has already performed rehash.
+ * Need to check again if we really need rehash.
+ * If the reason for rehash was size, but not any more - skip rehash.
+ */
+ num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
+ if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
+ return 0;
+
+ /* Now we're done all the checking - do the rehash:
+ * - extend match RTC size
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+
+ ret = hws_bwc_matcher_extend_size(bwc_matcher);
+ if (ret)
+ return ret;
+
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+static int
+hws_bwc_matcher_rehash_at(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* Rehash by action template doesn't require any additional checking.
+ * The bwc_matcher already contains the new action template.
+ * Just do the usual rehash:
+ * - create new matcher
+ * - move all the rules to the new matcher
+ * - destroy the old matcher
+ */
+ return hws_bwc_matcher_move(bwc_matcher);
+}
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx)
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ u32 num_of_rules;
+ int ret = 0;
+ int at_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, bwc_queue_idx, flow_source, &rule_attr);
+
+ queue_lock = hws_bwc_get_queue_lock(ctx, bwc_queue_idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ return ret;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule insertion: rehash AT failed (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ /* check if number of rules require rehash */
+ num_of_rules = bwc_matcher->num_of_rules;
+
+ if (unlikely(hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))) {
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash size [%d -> %d] failed (%d)\n",
+ bwc_matcher->size_log - MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP,
+ bwc_matcher->size_log,
+ ret);
+ return ret;
+ }
+
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ if (likely(!ret)) {
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+ return 0; /* rule inserted successfully */
+ }
+
+ /* At this point the rule wasn't added.
+ * It could be because there was collision, or some other problem.
+ * If we don't dive deeper than API, the only thing we know is that
+ * the status of completion is RTE_FLOW_OP_ERROR.
+ * Try rehash by size and insert rule again - last chance.
+ */
+
+ mutex_unlock(queue_lock);
+
+ hws_bwc_lock_all_queues(ctx);
+ ret = hws_bwc_matcher_rehash_size(bwc_matcher);
+ hws_bwc_unlock_all_queues(ctx);
+
+ if (ret) {
+ mlx5hws_err(ctx, "BWC rule insertion: rehash failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Rehash done, but we still have that pesky rule to add */
+ mutex_lock(queue_lock);
+
+ ret = hws_bwc_rule_create_sync(bwc_rule,
+ match_param,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+
+ if (unlikely(ret)) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx, "BWC rule insertion failed (%d)\n", ret);
+ return ret;
+ }
+
+ hws_bwc_rule_list_add(bwc_rule, bwc_queue_idx);
+ mutex_unlock(queue_lock);
+
+ return 0;
+}
+
+struct mlx5hws_bwc_rule *
+mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_rule *bwc_rule;
+ u16 bwc_queue_idx;
+ int ret;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return NULL;
+ }
+
+ bwc_rule = mlx5hws_bwc_rule_alloc(bwc_matcher);
+ if (unlikely(!bwc_rule))
+ return NULL;
+
+ bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
+
+ ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
+ params->match_buf,
+ rule_actions,
+ flow_source,
+ bwc_queue_idx);
+ if (unlikely(ret)) {
+ mlx5hws_bwc_rule_free(bwc_rule);
+ return NULL;
+ }
+
+ return bwc_rule;
+}
+
+static int
+hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mutex *queue_lock; /* Protect the queue */
+ int at_idx, ret;
+ u16 idx;
+
+ idx = bwc_rule->bwc_queue_idx;
+
+ mlx5hws_bwc_rule_fill_attr(bwc_matcher, idx, 0, &rule_attr);
+ queue_lock = hws_bwc_get_queue_lock(ctx, idx);
+
+ mutex_lock(queue_lock);
+
+ /* check if rehash needed due to missing action template */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (unlikely(at_idx < 0)) {
+ /* we need to extend BWC matcher action templates array */
+ mutex_unlock(queue_lock);
+ hws_bwc_lock_all_queues(ctx);
+
+ /* check again - perhaps other thread already did extend_at */
+ at_idx = hws_bwc_matcher_find_at(bwc_matcher, rule_actions);
+ if (likely(at_idx < 0)) {
+ ret = hws_bwc_matcher_extend_at(bwc_matcher, rule_actions);
+ if (unlikely(ret)) {
+ hws_bwc_unlock_all_queues(ctx);
+ mlx5hws_err(ctx, "BWC rule update: failed extending AT (%d)", ret);
+ return -EINVAL;
+ }
+
+ /* action templates array was extended, we need the last idx */
+ at_idx = bwc_matcher->num_of_at - 1;
+
+ ret = mlx5hws_matcher_attach_at(bwc_matcher->matcher,
+ bwc_matcher->at[at_idx]);
+ if (unlikely(ret)) {
+ /* Action template attach failed, possibly due to
+ * requiring more action STEs.
+ * Need to attempt creating new matcher with all
+ * the action templates, including the new one.
+ */
+ ret = hws_bwc_matcher_rehash_at(bwc_matcher);
+ if (unlikely(ret)) {
+ mlx5hws_action_template_destroy(bwc_matcher->at[at_idx]);
+ bwc_matcher->at[at_idx] = NULL;
+ bwc_matcher->num_of_at--;
+
+ hws_bwc_unlock_all_queues(ctx);
+
+ mlx5hws_err(ctx,
+ "BWC rule update: rehash AT failed (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ hws_bwc_unlock_all_queues(ctx);
+ mutex_lock(queue_lock);
+ }
+
+ ret = hws_bwc_rule_update_sync(bwc_rule,
+ at_idx,
+ rule_actions,
+ &rule_attr);
+ mutex_unlock(queue_lock);
+
+ if (unlikely(ret))
+ mlx5hws_err(ctx, "BWC rule: update failed (%d)\n", ret);
+
+ return ret;
+}
+
+int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_context_bwc_supported(ctx))) {
+ mlx5hws_err(ctx, "BWC rule: Context created w/o BWC API compatibility\n");
+ return -EINVAL;
+ }
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
new file mode 100644
index 000000000000..4fe8c32d8fbe
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_H_
+#define MLX5HWS_BWC_H_
+
+#define MLX5HWS_BWC_MATCHER_INIT_SIZE_LOG 1
+#define MLX5HWS_BWC_MATCHER_SIZE_LOG_STEP 1
+#define MLX5HWS_BWC_MATCHER_REHASH_PERCENT_TH 70
+#define MLX5HWS_BWC_MATCHER_REHASH_BURST_TH 32
+#define MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM 255
+
+#define MLX5HWS_BWC_MAX_ACTS 16
+
+struct mlx5hws_bwc_matcher {
+ struct mlx5hws_matcher *matcher;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u8 num_of_at;
+ u16 priority;
+ u8 size_log;
+ u32 num_of_rules; /* atomically accessed */
+ struct list_head *rules;
+};
+
+struct mlx5hws_bwc_rule {
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct mlx5hws_rule *rule;
+ u16 bwc_queue_idx;
+ struct list_head list_node;
+};
+
+int
+mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ enum mlx5hws_action_type action_types[]);
+
+int mlx5hws_bwc_matcher_destroy_simple(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+struct mlx5hws_bwc_rule *mlx5hws_bwc_rule_alloc(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+void mlx5hws_bwc_rule_free(struct mlx5hws_bwc_rule *bwc_rule);
+
+int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
+ u32 *match_param,
+ struct mlx5hws_rule_action rule_actions[],
+ u32 flow_source,
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule);
+
+void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
+ u16 bwc_queue_idx,
+ u32 flow_source,
+ struct mlx5hws_rule_attr *rule_attr);
+
+static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
+{
+ /* Besides the control queue, half of the queues are
+ * reguler HWS queues, and the other half are BWC queues.
+ */
+ return (ctx->queues - 1) / 2;
+}
+
+static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
+{
+ return idx + mlx5hws_bwc_queues(ctx);
+}
+
+#endif /* MLX5HWS_BWC_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
new file mode 100644
index 000000000000..bb563f50ef09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ struct mlx5hws_definer match_layout = {0};
+ struct mlx5hws_match_template *mt;
+ bool is_complex = false;
+ int ret;
+
+ if (!match_criteria_enable)
+ return false; /* empty matcher */
+
+ mt = mlx5hws_match_template_create(ctx,
+ mask->match_buf,
+ mask->match_sz,
+ match_criteria_enable);
+ if (!mt) {
+ mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ return false;
+ }
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ /* The only case that we're interested in is E2BIG,
+ * which means that the match parameters need to be
+ * split into complex martcher.
+ * For all other cases (good or bad) - just return true
+ * and let the usual match creation path handle it,
+ * both for good and bad flows.
+ */
+ if (ret == E2BIG) {
+ is_complex = true;
+ mlx5hws_dbg(ctx, "Matcher definer layout: need complex matcher\n");
+ } else {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ }
+ }
+
+ mlx5hws_match_template_destroy(mt);
+
+ return is_complex;
+}
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask)
+{
+ mlx5hws_err(table->ctx, "Complex matcher is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+void
+mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ /* nothing to do here */
+}
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
+{
+ mlx5hws_err(bwc_rule->bwc_matcher->matcher->tbl->ctx,
+ "Complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+{
+ return 0;
+}
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ mlx5hws_err(bwc_matcher->matcher->tbl->ctx,
+ "Moving complex rule is not supported yet\n");
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
new file mode 100644
index 000000000000..068ee8118609
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_bwc_complex.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_BWC_COMPLEX_H_
+#define MLX5HWS_BWC_COMPLEX_H_
+
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
+ struct mlx5hws_table *table,
+ u32 priority,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask);
+
+void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx);
+
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule);
+
+#endif /* MLX5HWS_BWC_COMPLEX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
new file mode 100644
index 000000000000..2c7b14172049
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.c
@@ -0,0 +1,1300 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static enum mlx5_ifc_flow_destination_type
+hws_cmd_dest_type_to_ifc_dest_type(enum mlx5_flow_destination_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ return MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ default:
+ pr_warn("HWS: unknown flow dest type %d\n", type);
+ return 0;
+ }
+};
+
+static int hws_cmd_general_obj_destroy(struct mlx5_core_dev *mdev,
+ u32 object_type,
+ u32 object_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, object_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, object_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+ MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
+ MLX5_SET(flow_table_context, ft_ctx, decap_en, ft_attr->decap_en);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
+ void *ft_ctx;
+
+ MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(modify_flow_table_in, in, modify_field_select, ft_attr->modify_fs);
+ MLX5_SET(modify_flow_table_in, in, table_id, table_id);
+
+ ft_ctx = MLX5_ADDR_OF(modify_flow_table_in, in, flow_table_context);
+
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_action, ft_attr->table_miss_action);
+ MLX5_SET(flow_table_context, ft_ctx, table_miss_id, ft_attr->table_miss_id);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_0, ft_attr->rtc_id_0);
+ MLX5_SET(flow_table_context, ft_ctx, hws.rtc_id_1, ft_attr->rtc_id_1);
+
+ return mlx5_cmd_exec_in(mdev, modify_flow_table, in);
+}
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 table_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1)
+{
+ u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {0};
+ void *ft_ctx;
+ int ret;
+
+ MLX5_SET(query_flow_table_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_TABLE);
+ MLX5_SET(query_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, query_flow_table, in, out);
+ if (ret)
+ return ret;
+
+ ft_ctx = MLX5_ADDR_OF(query_flow_table_out, out, flow_table_context);
+ *icm_addr_0 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_0);
+ *icm_addr_1 = MLX5_GET64(flow_table_context, ft_ctx, sws.sw_owner_icm_root_1);
+
+ return ret;
+}
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
+
+ MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ MLX5_SET(destroy_flow_table_in, in, table_type, fw_ft_type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
+}
+
+static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_fg_attr *fg_attr,
+ u32 *group_id)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in;
+ int ret;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET(create_flow_group_in, in, table_type, fg_attr->table_type);
+ MLX5_SET(create_flow_group_in, in, table_id, fg_attr->table_id);
+
+ ret = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+ if (ret)
+ goto out;
+
+ *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+ kvfree(in);
+ return ret;
+}
+
+static int hws_cmd_flow_group_destroy(struct mlx5_core_dev *mdev,
+ u32 ft_id, u32 fg_id, u8 ft_type)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+ MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft_type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg_id);
+
+ return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ void *in_flow_context;
+ u32 dest_entry_sz;
+ u32 total_dest_sz;
+ u32 action_flags;
+ u8 *in_dests;
+ u32 inlen;
+ u32 *in;
+ int ret;
+ u32 i;
+
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+ MLX5_SET(set_fte_in, in, table_type, table_type);
+ MLX5_SET(set_fte_in, in, table_id, table_id);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+ }
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = (u8 *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5hws_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+ enum mlx5_ifc_flow_destination_type ifc_dest_type =
+ hws_cmd_dest_type_to_ifc_dest_type(dest->destination_type);
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ fallthrough;
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type, ifc_dest_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ if (dest->ext_flags & MLX5HWS_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat_id);
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed creating FLOW_TABLE_ENTRY\n");
+
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, table_type);
+ MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+ return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr)
+{
+ struct mlx5hws_cmd_fg_attr fg_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *tbl;
+ int ret;
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, ft_attr, &tbl->ft_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FT\n");
+ goto free_tbl;
+ }
+
+ fg_attr.table_id = tbl->ft_id;
+ fg_attr.table_type = ft_attr->type;
+
+ ret = hws_cmd_flow_group_create(mdev, &fg_attr, &tbl->fg_id);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FG\n");
+ goto free_ft;
+ }
+
+ ret = mlx5hws_cmd_set_fte(mdev, ft_attr->type,
+ tbl->ft_id, tbl->fg_id, fte_attr);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create FTE\n");
+ goto free_fg;
+ }
+
+ tbl->type = ft_attr->type;
+ return tbl;
+
+free_fg:
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, ft_attr->type);
+free_ft:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr->type, tbl->ft_id);
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl)
+{
+ mlx5hws_cmd_delete_fte(mdev, tbl->type, tbl->ft_id);
+ hws_cmd_flow_group_destroy(mdev, tbl->ft_id, tbl->fg_id, tbl->type);
+ mlx5hws_cmd_flow_table_destroy(mdev, tbl->type, tbl->ft_id);
+ kfree(tbl);
+}
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr)
+{
+ u32 default_miss_tbl;
+
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ ft_attr->modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr->type = fw_ft_type;
+ ft_attr->table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+
+ default_miss_tbl = ctx->common_res[type].default_miss->ft_id;
+ if (!default_miss_tbl) {
+ pr_warn("HWS: no flow table ID for default miss\n");
+ return;
+ }
+
+ ft_attr->table_miss_id = default_miss_tbl;
+}
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_rtc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_RTC);
+
+ attr = MLX5_ADDR_OF(create_rtc_in, in, rtc);
+ MLX5_SET(rtc, attr, ste_format_0, rtc_attr->is_frst_jumbo ?
+ MLX5_IFC_RTC_STE_FORMAT_11DW :
+ MLX5_IFC_RTC_STE_FORMAT_8DW);
+
+ if (rtc_attr->is_scnd_range) {
+ MLX5_SET(rtc, attr, ste_format_1, MLX5_IFC_RTC_STE_FORMAT_RANGE);
+ MLX5_SET(rtc, attr, num_match_ste, 2);
+ }
+
+ MLX5_SET(rtc, attr, pd, rtc_attr->pd);
+ MLX5_SET(rtc, attr, update_method, rtc_attr->fw_gen_wqe);
+ MLX5_SET(rtc, attr, update_index_mode, rtc_attr->update_index_mode);
+ MLX5_SET(rtc, attr, access_index_mode, rtc_attr->access_index_mode);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, log_depth, rtc_attr->log_depth);
+ MLX5_SET(rtc, attr, log_hash_size, rtc_attr->log_size);
+ MLX5_SET(rtc, attr, table_type, rtc_attr->table_type);
+ MLX5_SET(rtc, attr, num_hash_definer, rtc_attr->num_hash_definer);
+ MLX5_SET(rtc, attr, match_definer_0, rtc_attr->match_definer_0);
+ MLX5_SET(rtc, attr, match_definer_1, rtc_attr->match_definer_1);
+ MLX5_SET(rtc, attr, stc_id, rtc_attr->stc_base);
+ MLX5_SET(rtc, attr, ste_table_base_id, rtc_attr->ste_base);
+ MLX5_SET(rtc, attr, ste_table_offset, rtc_attr->ste_offset);
+ MLX5_SET(rtc, attr, miss_flow_table_id, rtc_attr->miss_ft_id);
+ MLX5_SET(rtc, attr, reparse_mode, rtc_attr->reparse_mode);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create RTC\n");
+ goto out;
+ }
+
+ *rtc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_RTC, rtc_id);
+}
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, stc_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, table_type, stc_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STC\n");
+ goto out;
+ }
+
+ *stc_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STC, stc_id);
+}
+
+static int
+hws_cmd_stc_modify_set_stc_param(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr,
+ void *stc_param)
+{
+ switch (stc_attr->action_type) {
+ case MLX5_IFC_STC_ACTION_TYPE_COUNTER:
+ MLX5_SET(stc_ste_param_flow_counter, stc_param, flow_counter_id, stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR:
+ MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_pattern_id, stc_attr->modify_header.pattern_id);
+ MLX5_SET(stc_ste_param_header_modify_list, stc_param,
+ header_modify_argument_id, stc_attr->modify_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE:
+ MLX5_SET(stc_ste_param_remove, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE);
+ MLX5_SET(stc_ste_param_remove, stc_param, decap,
+ stc_attr->remove_header.decap);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_start_anchor,
+ stc_attr->remove_header.start_anchor);
+ MLX5_SET(stc_ste_param_remove, stc_param, remove_end_anchor,
+ stc_attr->remove_header.end_anchor);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT:
+ MLX5_SET(stc_ste_param_insert, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_INSERT);
+ MLX5_SET(stc_ste_param_insert, stc_param, encap,
+ stc_attr->insert_header.encap);
+ MLX5_SET(stc_ste_param_insert, stc_param, inline_data,
+ stc_attr->insert_header.is_inline);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_anchor,
+ stc_attr->insert_header.insert_anchor);
+ /* HW gets the next 2 sizes in words */
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_size,
+ stc_attr->insert_header.header_size / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_offset,
+ stc_attr->insert_header.insert_offset / W_SIZE);
+ MLX5_SET(stc_ste_param_insert, stc_param, insert_argument,
+ stc_attr->insert_header.arg_id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_COPY:
+ case MLX5_IFC_STC_ACTION_TYPE_SET:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD:
+ case MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD:
+ *(__be64 *)stc_param = stc_attr->modify_action.data;
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK:
+ MLX5_SET(stc_ste_param_vport, stc_param, vport_number,
+ stc_attr->vport.vport_num);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id,
+ stc_attr->vport.esw_owner_vhca_id);
+ MLX5_SET(stc_ste_param_vport, stc_param, eswitch_owner_vhca_id_valid,
+ stc_attr->vport.eswitch_owner_vhca_id_valid);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_DROP:
+ case MLX5_IFC_STC_ACTION_TYPE_NOP:
+ case MLX5_IFC_STC_ACTION_TYPE_TAG:
+ case MLX5_IFC_STC_ACTION_TYPE_ALLOW:
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_ASO:
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_object_id,
+ stc_attr->aso.devx_obj_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, return_reg_id,
+ stc_attr->aso.return_reg_id);
+ MLX5_SET(stc_ste_param_execute_aso, stc_param, aso_type,
+ stc_attr->aso.aso_type);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE:
+ MLX5_SET(stc_ste_param_ste_table, stc_param, ste_obj_id,
+ stc_attr->ste_table.ste_obj_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, match_definer_id,
+ stc_attr->ste_table.match_definer_id);
+ MLX5_SET(stc_ste_param_ste_table, stc_param, log_hash_size,
+ stc_attr->ste_table.log_hash_size);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS:
+ MLX5_SET(stc_ste_param_remove_words, stc_param, action_type,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS);
+ MLX5_SET(stc_ste_param_remove_words, stc_param, remove_start_anchor,
+ stc_attr->remove_words.start_anchor);
+ MLX5_SET(stc_ste_param_remove_words, stc_param,
+ remove_size, stc_attr->remove_words.num_of_words);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_encrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION:
+ MLX5_SET(stc_ste_param_ipsec_decrypt, stc_param, ipsec_object_id,
+ stc_attr->id);
+ break;
+ case MLX5_IFC_STC_ACTION_TYPE_TRAILER:
+ MLX5_SET(stc_ste_param_trailer, stc_param, command,
+ stc_attr->reformat_trailer.op);
+ MLX5_SET(stc_ste_param_trailer, stc_param, type,
+ stc_attr->reformat_trailer.type);
+ MLX5_SET(stc_ste_param_trailer, stc_param, length,
+ stc_attr->reformat_trailer.size);
+ break;
+ default:
+ mlx5_core_err(mdev, "Not supported type %d\n", stc_attr->action_type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_stc_in)] = {0};
+ void *stc_param;
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, stc_id);
+ MLX5_SET(general_obj_in_cmd_hdr, in,
+ op_param.query.obj_offset, stc_attr->stc_offset);
+
+ attr = MLX5_ADDR_OF(create_stc_in, in, stc);
+ MLX5_SET(stc, attr, ste_action_offset, stc_attr->action_offset);
+ MLX5_SET(stc, attr, action_type, stc_attr->action_type);
+ MLX5_SET(stc, attr, reparse_mode, stc_attr->reparse_mode);
+ MLX5_SET64(stc, attr, modify_field_select,
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC);
+
+ /* Set destination TIRN, TAG, FT ID, STE ID */
+ stc_param = MLX5_ADDR_OF(stc, attr, stc_param);
+ ret = hws_cmd_stc_modify_set_stc_param(mdev, stc_attr, stc_param);
+ if (ret)
+ return ret;
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify STC FW action_type %d\n",
+ stc_attr->action_type);
+
+ return ret;
+}
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_arg_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_arg_in, in, arg);
+ MLX5_SET(arg, attr, access_pd, pd);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ARG\n");
+ goto out;
+ }
+
+ *arg_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT, arg_id);
+}
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_header_modify_pattern_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ int num_of_actions;
+ u64 *pattern_data;
+ void *pattern;
+ void *attr;
+ int ret;
+ int i;
+
+ if (pattern_length > MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY) {
+ mlx5_core_err(mdev, "Pattern length %d exceeds limit %d\n",
+ pattern_length, MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY);
+ return -EINVAL;
+ }
+
+ attr = MLX5_ADDR_OF(create_header_modify_pattern_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN);
+
+ pattern = MLX5_ADDR_OF(create_header_modify_pattern_in, in, pattern);
+ /* Pattern_length is in ddwords */
+ MLX5_SET(header_modify_pattern_in, pattern, pattern_length, pattern_length / (2 * DW_SIZE));
+
+ pattern_data = (u64 *)MLX5_ADDR_OF(header_modify_pattern_in, pattern, pattern_data);
+ memcpy(pattern_data, actions, pattern_length);
+
+ num_of_actions = pattern_length / MLX5HWS_MODIFY_ACTION_SIZE;
+ for (i = 0; i < num_of_actions; i++) {
+ int type;
+
+ type = MLX5_GET(set_action_in, &pattern_data[i], action_type);
+ if (type != MLX5_MODIFICATION_TYPE_COPY &&
+ type != MLX5_MODIFICATION_TYPE_ADD_FIELD)
+ /* Action typ-copy use all bytes for control */
+ MLX5_SET(set_action_in, &pattern_data[i], data, 0);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create header_modify_pattern\n");
+ goto out;
+ }
+
+ *ptrn_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN, ptrn_id);
+}
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_ste_in)] = {0};
+ void *attr;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, MLX5_OBJ_TYPE_STE);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, op_param.create.log_obj_range, ste_attr->log_obj_range);
+
+ attr = MLX5_ADDR_OF(create_ste_in, in, ste);
+ MLX5_SET(ste, attr, table_type, ste_attr->table_type);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create STE\n");
+ goto out;
+ }
+
+ *ste_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_STE, ste_id);
+}
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_definer_in)] = {0};
+ void *ptr;
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_definer_in, in, definer);
+ MLX5_SET(definer, ptr, format_id, MLX5_IFC_DEFINER_FORMAT_ID_SELECT);
+
+ MLX5_SET(definer, ptr, format_select_dw0, def_attr->dw_selector[0]);
+ MLX5_SET(definer, ptr, format_select_dw1, def_attr->dw_selector[1]);
+ MLX5_SET(definer, ptr, format_select_dw2, def_attr->dw_selector[2]);
+ MLX5_SET(definer, ptr, format_select_dw3, def_attr->dw_selector[3]);
+ MLX5_SET(definer, ptr, format_select_dw4, def_attr->dw_selector[4]);
+ MLX5_SET(definer, ptr, format_select_dw5, def_attr->dw_selector[5]);
+ MLX5_SET(definer, ptr, format_select_dw6, def_attr->dw_selector[6]);
+ MLX5_SET(definer, ptr, format_select_dw7, def_attr->dw_selector[7]);
+ MLX5_SET(definer, ptr, format_select_dw8, def_attr->dw_selector[8]);
+
+ MLX5_SET(definer, ptr, format_select_byte0, def_attr->byte_selector[0]);
+ MLX5_SET(definer, ptr, format_select_byte1, def_attr->byte_selector[1]);
+ MLX5_SET(definer, ptr, format_select_byte2, def_attr->byte_selector[2]);
+ MLX5_SET(definer, ptr, format_select_byte3, def_attr->byte_selector[3]);
+ MLX5_SET(definer, ptr, format_select_byte4, def_attr->byte_selector[4]);
+ MLX5_SET(definer, ptr, format_select_byte5, def_attr->byte_selector[5]);
+ MLX5_SET(definer, ptr, format_select_byte6, def_attr->byte_selector[6]);
+ MLX5_SET(definer, ptr, format_select_byte7, def_attr->byte_selector[7]);
+
+ ptr = MLX5_ADDR_OF(definer, ptr, match_mask);
+ memcpy(ptr, def_attr->match_mask, MLX5_FLD_SZ_BYTES(definer, match_mask));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create Definer\n");
+ goto out;
+ }
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id)
+{
+ hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_MATCH_DEFINER, definer_id);
+}
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ void *prctx;
+ void *pdata;
+ void *in;
+ int ret;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = kzalloc(insz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ ret = mlx5_cmd_exec(mdev, in, insz, out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create packet reformat\n");
+ goto out;
+ }
+
+ *reformat_id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+out:
+ kfree(in);
+ return ret;
+}
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_in)] = {0};
+ int ret;
+
+ MLX5_SET(dealloc_packet_reformat_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_in, in,
+ packet_reformat_id, reformat_id);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to destroy packet_reformat\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ void *sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to modify SQ\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {0};
+ void *key;
+ int ret;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ mlx5_core_err(mdev, "Failed to execute ALLOW_OTHER_VHCA_ACCESS command\n");
+
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {0};
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, attr, op_param.create.alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to create ALIAS OBJ\n");
+ goto out;
+ }
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+out:
+ return ret;
+}
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id)
+{
+ return hws_cmd_general_obj_destroy(mdev, obj_type, obj_id);
+}
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe)
+{
+ u32 out[MLX5_ST_SZ_DW(generate_wqe_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(generate_wqe_in)] = {0};
+ u8 status;
+ void *ptr;
+ int ret;
+
+ MLX5_SET(generate_wqe_in, in, opcode, MLX5_CMD_OP_GENERATE_WQE);
+ MLX5_SET(generate_wqe_in, in, pdn, attr->pdn);
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_ctrl);
+ memcpy(ptr, attr->wqe_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_ctrl);
+ memcpy(ptr, attr->gta_ctrl, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_ctrl));
+
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_0);
+ memcpy(ptr, attr->gta_data_0, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_0));
+
+ if (attr->gta_data_1) {
+ ptr = MLX5_ADDR_OF(generate_wqe_in, in, wqe_gta_data_1);
+ memcpy(ptr, attr->gta_data_1, MLX5_FLD_SZ_BYTES(generate_wqe_in, wqe_gta_data_1));
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write GTA WQE using FW\n");
+ return ret;
+ }
+
+ status = MLX5_GET(generate_wqe_out, out, status);
+ if (status) {
+ mlx5_core_err(mdev, "Invalid FW CQE status %d\n", status);
+ return -EINVAL;
+ }
+
+ ptr = MLX5_ADDR_OF(generate_wqe_out, out, cqe_data);
+ memcpy(ret_cqe, ptr, sizeof(*ret_cqe));
+
+ return ret;
+}
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ u32 out_size;
+ u32 *out;
+ int ret;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps\n");
+ goto out;
+ }
+
+ caps->wqe_based_update =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.wqe_based_flow_table_update_cap);
+
+ caps->eswitch_manager = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.eswitch_manager);
+
+ caps->flex_protocols = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_protocols);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+ caps->flex_parser_id_geneve_tlv_option_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_geneve_tlv_option_0);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+ caps->flex_parser_id_mpls_over_gre =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_gre);
+
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+ caps->flex_parser_id_mpls_over_udp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.flex_parser_id_outer_first_mpls_over_udp_label);
+
+ caps->log_header_modify_argument_granularity =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity);
+
+ caps->log_header_modify_argument_granularity -=
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_granularity_offset);
+
+ caps->log_header_modify_argument_max_alloc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.log_header_modify_argument_max_alloc);
+
+ caps->definer_format_sup =
+ MLX5_GET64(query_hca_cap_out, out,
+ capability.cmd_hca_cap.match_definer_format_supported);
+
+ caps->vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ caps->sq_ts_format = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.sq_ts_format);
+
+ caps->ipsec_offload = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.ipsec_offload);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device caps 2\n");
+ goto out;
+ }
+
+ caps->full_dw_jumbo_support =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_8_6_ext);
+
+ caps->format_select_gtpu_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_0);
+
+ caps->format_select_gtpu_dw_1 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_1);
+
+ caps->format_select_gtpu_dw_2 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_dw_2);
+
+ caps->format_select_gtpu_ext_dw_0 =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.format_select_dw_gtpu_first_ext_dw_0);
+
+ caps->supp_type_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.generate_wqe_type);
+
+ caps->flow_table_hash_type =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap_2.flow_table_hash_type);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table caps\n");
+ goto out;
+ }
+
+ caps->nic_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->nic_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ caps->nic_ft.ignore_flow_level_rtc_valid =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ignore_flow_level_rtc_valid);
+
+ caps->flex_parser_ok_bits_supp =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+ if (caps->wqe_based_update) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query WQE based FT caps\n");
+ goto out;
+ }
+
+ caps->rtc_reparse_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_reparse_mode);
+
+ caps->ste_format =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format);
+
+ caps->rtc_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_index_mode);
+
+ caps->rtc_log_depth_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_log_depth_max);
+
+ caps->ste_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_max);
+
+ caps->ste_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_alloc_log_granularity);
+
+ caps->trivial_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.trivial_match_definer);
+
+ caps->stc_alloc_log_max =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_max);
+
+ caps->stc_alloc_log_gran =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.stc_alloc_log_granularity);
+
+ caps->rtc_hash_split_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_hash_split_table);
+
+ caps->rtc_linear_lookup_table =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_linear_lookup_table);
+
+ caps->access_index_mode =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.access_index_mode);
+
+ caps->linear_match_definer =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.linear_match_definer_reg_c3);
+
+ caps->rtc_max_hash_def_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.rtc_max_num_hash_definer_gen_wqe);
+
+ caps->supp_ste_format_gen_wqe =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.ste_format_gen_wqe);
+
+ caps->fdb_tir_stc =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.wqe_based_flow_table_cap.fdb_jump_to_tir_stc);
+ }
+
+ if (caps->eswitch_manager) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query flow table esw caps\n");
+ goto out;
+ }
+
+ caps->fdb_ft.max_level =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.max_ft_level);
+
+ caps->fdb_ft.reparse =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.flow_table_nic_cap.flow_table_properties_nic_receive.reparse);
+
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW | HCA_CAP_OPMOD_GET_CUR);
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query eswitch capabilities\n");
+ goto out;
+ }
+
+ if (MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number_valid))
+ caps->eswitch_manager_vport_number =
+ MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
+ }
+
+ ret = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to query device attributes\n");
+ goto out;
+ }
+
+ snprintf(caps->fw_ver, sizeof(caps->fw_ver), "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+out:
+ kfree(out);
+ return ret;
+}
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi)
+{
+ bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ int out_size;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, other_function);
+ MLX5_SET(query_hca_cap_in, in, function_id,
+ mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ kfree(out);
+ return err;
+ }
+
+ *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+ kfree(out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
new file mode 100644
index 000000000000..2fbcf4ff571a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_cmd.h
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CMD_H_
+#define MLX5HWS_CMD_H_
+
+#define WIRE_PORT 0xFFFF
+
+#define ACCESS_KEY_LEN 32
+
+enum mlx5hws_cmd_ext_dest_flags {
+ MLX5HWS_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5HWS_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5hws_cmd_set_fte_dest {
+ u8 destination_type;
+ u32 destination_id;
+ enum mlx5hws_cmd_ext_dest_flags ext_flags;
+ u32 ext_reformat_id;
+ u16 esw_owner_vhca_id;
+};
+
+struct mlx5hws_cmd_set_fte_attr {
+ u32 action_flags;
+ bool ignore_flow_level;
+ u8 flow_source;
+ u8 extended_dest;
+ u8 encrypt_decrypt_type;
+ u32 encrypt_decrypt_obj_id;
+ u32 packet_reformat_id;
+ u32 dests_num;
+ struct mlx5hws_cmd_set_fte_dest *dests;
+};
+
+struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+};
+
+struct mlx5hws_cmd_ft_modify_attr {
+ u8 type;
+ u32 rtc_id_0;
+ u32 rtc_id_1;
+ u32 table_miss_id;
+ u8 table_miss_action;
+ u64 modify_fs;
+};
+
+struct mlx5hws_cmd_ft_query_attr {
+ u8 type;
+};
+
+struct mlx5hws_cmd_fg_attr {
+ u32 table_id;
+ u32 table_type;
+};
+
+struct mlx5hws_cmd_forward_tbl {
+ u8 type;
+ u32 ft_id;
+ u32 fg_id;
+ u32 refcount;
+};
+
+struct mlx5hws_cmd_rtc_create_attr {
+ u32 pd;
+ u32 stc_base;
+ u32 ste_base;
+ u32 ste_offset;
+ u32 miss_ft_id;
+ bool fw_gen_wqe;
+ u8 update_index_mode;
+ u8 access_index_mode;
+ u8 num_hash_definer;
+ u8 log_depth;
+ u8 log_size;
+ u8 table_type;
+ u8 match_definer_0;
+ u8 match_definer_1;
+ u8 reparse_mode;
+ bool is_frst_jumbo;
+ bool is_scnd_range;
+};
+
+struct mlx5hws_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_stc_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_stc_modify_attr {
+ u32 stc_offset;
+ u8 action_offset;
+ u8 reparse_mode;
+ enum mlx5_ifc_stc_action_type action_type;
+ union {
+ u32 id; /* TIRN, TAG, FT ID, STE ID, CRYPTO */
+ struct {
+ u8 decap;
+ u16 start_anchor;
+ u16 end_anchor;
+ } remove_header;
+ struct {
+ u32 arg_id;
+ u32 pattern_id;
+ } modify_header;
+ struct {
+ __be64 data;
+ } modify_action;
+ struct {
+ u32 arg_id;
+ u32 header_size;
+ u8 is_inline;
+ u8 encap;
+ u16 insert_anchor;
+ u16 insert_offset;
+ } insert_header;
+ struct {
+ u8 aso_type;
+ u32 devx_obj_id;
+ u8 return_reg_id;
+ } aso;
+ struct {
+ u16 vport_num;
+ u16 esw_owner_vhca_id;
+ u8 eswitch_owner_vhca_id_valid;
+ } vport;
+ struct {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 ste_obj_id; /* Internal */
+ u32 match_definer_id;
+ u8 log_hash_size;
+ bool ignore_tx;
+ } ste_table;
+ struct {
+ u16 start_anchor;
+ u16 num_of_words;
+ } remove_words;
+ struct {
+ u8 type;
+ u8 op;
+ u8 size;
+ } reformat_trailer;
+
+ u32 dest_table_id;
+ u32 dest_tir_num;
+ };
+};
+
+struct mlx5hws_cmd_ste_create_attr {
+ u8 log_obj_range;
+ u8 table_type;
+};
+
+struct mlx5hws_cmd_definer_create_attr {
+ u8 *dw_selector;
+ u8 *byte_selector;
+ u8 *match_mask;
+};
+
+struct mlx5hws_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5hws_cmd_packet_reformat_create_attr {
+ u8 type;
+ size_t data_sz;
+ void *data;
+ u8 reformat_param_0;
+};
+
+struct mlx5hws_cmd_query_ft_caps {
+ u8 max_level;
+ u8 reparse;
+ u8 ignore_flow_level_rtc_valid;
+};
+
+struct mlx5hws_cmd_generate_wqe_attr {
+ u8 *wqe_ctrl;
+ u8 *gta_ctrl;
+ u8 *gta_data_0;
+ u8 *gta_data_1;
+ u32 pdn;
+};
+
+struct mlx5hws_cmd_query_caps {
+ u32 flex_protocols;
+ u8 wqe_based_update;
+ u8 rtc_reparse_mode;
+ u16 ste_format;
+ u8 rtc_index_mode;
+ u8 ste_alloc_log_max;
+ u8 ste_alloc_log_gran;
+ u8 stc_alloc_log_max;
+ u8 stc_alloc_log_gran;
+ u8 rtc_log_depth_max;
+ u8 format_select_gtpu_dw_0;
+ u8 format_select_gtpu_dw_1;
+ u8 flow_table_hash_type;
+ u8 format_select_gtpu_dw_2;
+ u8 format_select_gtpu_ext_dw_0;
+ u8 access_index_mode;
+ u32 linear_match_definer;
+ bool full_dw_jumbo_support;
+ bool rtc_hash_split_table;
+ bool rtc_linear_lookup_table;
+ u32 supp_type_gen_wqe;
+ u8 rtc_max_hash_def_gen_wqe;
+ u16 supp_ste_format_gen_wqe;
+ struct mlx5hws_cmd_query_ft_caps nic_ft;
+ struct mlx5hws_cmd_query_ft_caps fdb_ft;
+ bool eswitch_manager;
+ bool merged_eswitch;
+ u32 eswitch_manager_vport_number;
+ u8 log_header_modify_argument_granularity;
+ u8 log_header_modify_argument_max_alloc;
+ u8 sq_ts_format;
+ u8 fdb_tir_stc;
+ u64 definer_format_sup;
+ u32 trivial_match_definer;
+ u32 vhca_id;
+ u32 shared_vhca_id;
+ char fw_ver[64];
+ bool ipsec_offload;
+ bool is_ecpf;
+ u8 flex_parser_ok_bits_supp;
+ u8 flex_parser_id_geneve_tlv_option_0;
+ u8 flex_parser_id_mpls_over_gre;
+ u8 flex_parser_id_mpls_over_udp;
+};
+
+int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ u32 *table_id);
+
+int mlx5hws_cmd_flow_table_modify(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr,
+ u32 table_id);
+
+int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
+ u32 obj_id,
+ struct mlx5hws_cmd_ft_query_attr *ft_attr,
+ u64 *icm_addr_0, u64 *icm_addr_1);
+
+int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u8 fw_ft_type, u32 table_id);
+
+void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
+ u32 table_id);
+
+int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ u32 *rtc_id);
+
+void mlx5hws_cmd_rtc_destroy(struct mlx5_core_dev *mdev, u32 rtc_id);
+
+int mlx5hws_cmd_stc_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_stc_create_attr *stc_attr,
+ u32 *stc_id);
+
+int mlx5hws_cmd_stc_modify(struct mlx5_core_dev *mdev,
+ u32 stc_id,
+ struct mlx5hws_cmd_stc_modify_attr *stc_attr);
+
+void mlx5hws_cmd_stc_destroy(struct mlx5_core_dev *mdev, u32 stc_id);
+
+int mlx5hws_cmd_generate_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_generate_wqe_attr *attr,
+ struct mlx5_cqe64 *ret_cqe);
+
+int mlx5hws_cmd_ste_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ste_create_attr *ste_attr,
+ u32 *ste_id);
+
+void mlx5hws_cmd_ste_destroy(struct mlx5_core_dev *mdev, u32 ste_id);
+
+int mlx5hws_cmd_definer_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_definer_create_attr *def_attr,
+ u32 *definer_id);
+
+void mlx5hws_cmd_definer_destroy(struct mlx5_core_dev *mdev,
+ u32 definer_id);
+
+int mlx5hws_cmd_arg_create(struct mlx5_core_dev *mdev,
+ u16 log_obj_range,
+ u32 pd,
+ u32 *arg_id);
+
+void mlx5hws_cmd_arg_destroy(struct mlx5_core_dev *mdev,
+ u32 arg_id);
+
+int mlx5hws_cmd_header_modify_pattern_create(struct mlx5_core_dev *mdev,
+ u32 pattern_length,
+ u8 *actions,
+ u32 *ptrn_id);
+
+void mlx5hws_cmd_header_modify_pattern_destroy(struct mlx5_core_dev *mdev,
+ u32 ptrn_id);
+
+int mlx5hws_cmd_packet_reformat_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_packet_reformat_create_attr *attr,
+ u32 *reformat_id);
+
+int mlx5hws_cmd_packet_reformat_destroy(struct mlx5_core_dev *mdev,
+ u32 reformat_id);
+
+int mlx5hws_cmd_set_fte(struct mlx5_core_dev *mdev,
+ u32 table_type,
+ u32 table_id,
+ u32 group_id,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+int mlx5hws_cmd_delete_fte(struct mlx5_core_dev *mdev,
+ u32 table_type, u32 table_id);
+
+struct mlx5hws_cmd_forward_tbl *
+mlx5hws_cmd_forward_tbl_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr,
+ struct mlx5hws_cmd_set_fte_attr *fte_attr);
+
+void mlx5hws_cmd_forward_tbl_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_forward_tbl *tbl);
+
+int mlx5hws_cmd_alias_obj_create(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+
+int mlx5hws_cmd_alias_obj_destroy(struct mlx5_core_dev *mdev,
+ u16 obj_type,
+ u32 obj_id);
+
+int mlx5hws_cmd_sq_modify_rdy(struct mlx5_core_dev *mdev, u32 sqn);
+
+int mlx5hws_cmd_query_caps(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_query_caps *caps);
+
+void mlx5hws_cmd_set_attr_connect_miss_tbl(struct mlx5hws_context *ctx,
+ u32 fw_ft_type,
+ enum mlx5hws_table_type type,
+ struct mlx5hws_cmd_ft_modify_attr *ft_attr);
+
+int mlx5hws_cmd_allow_other_vhca_access(struct mlx5_core_dev *mdev,
+ struct mlx5hws_cmd_allow_other_vhca_access_attr *attr);
+
+int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
+ u16 vport_number, u16 *gvmi);
+
+#endif /* MLX5HWS_CMD_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
new file mode 100644
index 000000000000..00e4fdf4a558
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA CORPORATION. All rights reserved. */
+
+#include "mlx5hws_internal.h"
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx)
+{
+ return IS_BIT_SET(ctx->caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_BY_STC);
+}
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx)
+{
+ /* Prefer to use dynamic reparse, reparse only specific actions */
+ if (mlx5hws_context_cap_dynamic_reparse(ctx))
+ return MLX5_IFC_RTC_REPARSE_NEVER;
+
+ /* Otherwise use less efficient static */
+ return MLX5_IFC_RTC_REPARSE_ALWAYS;
+}
+
+static int hws_context_pools_init(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool_attr pool_attr = {0};
+ u8 max_log_sz;
+ int ret;
+ int i;
+
+ ret = mlx5hws_pat_init_pattern_cache(&ctx->pattern_cache);
+ if (ret)
+ return ret;
+
+ ret = mlx5hws_definer_init_cache(&ctx->definer_cache);
+ if (ret)
+ goto uninit_pat_cache;
+
+ /* Create an STC pool per FT type */
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STC;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STC_POOL;
+ max_log_sz = min(MLX5HWS_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max);
+ pool_attr.alloc_log_sz = max(max_log_sz, ctx->caps->stc_alloc_log_gran);
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ pool_attr.table_type = i;
+ ctx->stc_pool[i] = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!ctx->stc_pool[i]) {
+ mlx5hws_err(ctx, "Failed to allocate STC pool [%d]", i);
+ ret = -ENOMEM;
+ goto free_stc_pools;
+ }
+ }
+
+ return 0;
+
+free_stc_pools:
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++)
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+ return ret;
+}
+
+static void hws_context_pools_uninit(struct mlx5hws_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ if (ctx->stc_pool[i])
+ mlx5hws_pool_destroy(ctx->stc_pool[i]);
+ }
+
+ mlx5hws_definer_uninit_cache(ctx->definer_cache);
+ mlx5hws_pat_uninit_pattern_cache(ctx->pattern_cache);
+}
+
+static int hws_context_init_pd(struct mlx5hws_context *ctx)
+{
+ int ret = 0;
+
+ ret = mlx5_core_alloc_pd(ctx->mdev, &ctx->pd_num);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate PD\n");
+ return ret;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_PRIVATE_PD;
+
+ return 0;
+}
+
+static int hws_context_uninit_pd(struct mlx5hws_context *ctx)
+{
+ if (ctx->flags & MLX5HWS_CONTEXT_FLAG_PRIVATE_PD)
+ mlx5_core_dealloc_pd(ctx->mdev, ctx->pd_num);
+
+ return 0;
+}
+
+static void hws_context_check_hws_supp(struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ /* HWS not supported on device / FW */
+ if (!caps->wqe_based_update) {
+ mlx5hws_err(ctx, "Required HWS WQE based insertion cap not supported\n");
+ return;
+ }
+
+ if (!caps->eswitch_manager) {
+ mlx5hws_err(ctx, "HWS is not supported for non eswitch manager port\n");
+ return;
+ }
+
+ /* Current solution requires all rules to set reparse bit */
+ if ((!caps->nic_ft.reparse ||
+ (!caps->fdb_ft.reparse && caps->eswitch_manager)) ||
+ !IS_BIT_SET(caps->rtc_reparse_mode, MLX5_IFC_RTC_REPARSE_ALWAYS)) {
+ mlx5hws_err(ctx, "Required HWS reparse cap not supported\n");
+ return;
+ }
+
+ /* FW/HW must support 8DW STE */
+ if (!IS_BIT_SET(caps->ste_format, MLX5_IFC_RTC_STE_FORMAT_8DW)) {
+ mlx5hws_err(ctx, "Required HWS STE format not supported\n");
+ return;
+ }
+
+ /* Adding rules by hash and by offset are requirements */
+ if (!IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH) ||
+ !IS_BIT_SET(caps->rtc_index_mode, MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET)) {
+ mlx5hws_err(ctx, "Required HWS RTC update mode not supported\n");
+ return;
+ }
+
+ /* Support for SELECT definer ID is required */
+ if (!IS_BIT_SET(caps->definer_format_sup, MLX5_IFC_DEFINER_FORMAT_ID_SELECT)) {
+ mlx5hws_err(ctx, "Required HWS Dynamic definer not supported\n");
+ return;
+ }
+
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT;
+}
+
+static int hws_context_init_hws(struct mlx5hws_context *ctx,
+ struct mlx5hws_context_attr *attr)
+{
+ int ret;
+
+ hws_context_check_hws_supp(ctx);
+
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return 0;
+
+ ret = hws_context_init_pd(ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_context_pools_init(ctx);
+ if (ret)
+ goto uninit_pd;
+
+ if (attr->bwc)
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+
+ ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
+ if (ret)
+ goto pools_uninit;
+
+ INIT_LIST_HEAD(&ctx->tbl_list);
+
+ return 0;
+
+pools_uninit:
+ hws_context_pools_uninit(ctx);
+uninit_pd:
+ hws_context_uninit_pd(ctx);
+ return ret;
+}
+
+static void hws_context_uninit_hws(struct mlx5hws_context *ctx)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT))
+ return;
+
+ mlx5hws_send_queues_close(ctx);
+ hws_context_pools_uninit(ctx);
+ hws_context_uninit_pd(ctx);
+}
+
+struct mlx5hws_context *mlx5hws_context_open(struct mlx5_core_dev *mdev,
+ struct mlx5hws_context_attr *attr)
+{
+ struct mlx5hws_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->mdev = mdev;
+
+ mutex_init(&ctx->ctrl_lock);
+ xa_init(&ctx->peer_ctx_xa);
+
+ ctx->caps = kzalloc(sizeof(*ctx->caps), GFP_KERNEL);
+ if (!ctx->caps)
+ goto free_ctx;
+
+ ret = mlx5hws_cmd_query_caps(mdev, ctx->caps);
+ if (ret)
+ goto free_caps;
+
+ ret = mlx5hws_vport_init_vports(ctx);
+ if (ret)
+ goto free_caps;
+
+ ret = hws_context_init_hws(ctx, attr);
+ if (ret)
+ goto uninit_vports;
+
+ mlx5hws_debug_init_dump(ctx);
+
+ return ctx;
+
+uninit_vports:
+ mlx5hws_vport_uninit_vports(ctx);
+free_caps:
+ kfree(ctx->caps);
+free_ctx:
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return NULL;
+}
+
+int mlx5hws_context_close(struct mlx5hws_context *ctx)
+{
+ mlx5hws_debug_uninit_dump(ctx);
+ hws_context_uninit_hws(ctx);
+ mlx5hws_vport_uninit_vports(ctx);
+ kfree(ctx->caps);
+ xa_destroy(&ctx->peer_ctx_xa);
+ mutex_destroy(&ctx->ctrl_lock);
+ kfree(ctx);
+ return 0;
+}
+
+void mlx5hws_context_set_peer(struct mlx5hws_context *ctx,
+ struct mlx5hws_context *peer_ctx,
+ u16 peer_vhca_id)
+{
+ mutex_lock(&ctx->ctrl_lock);
+
+ if (xa_err(xa_store(&ctx->peer_ctx_xa, peer_vhca_id, peer_ctx, GFP_KERNEL)))
+ pr_warn("HWS: failed storing peer vhca ID in peer xarray\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
new file mode 100644
index 000000000000..e5a7ce604334
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_context.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_CONTEXT_H_
+#define MLX5HWS_CONTEXT_H_
+
+enum mlx5hws_context_flags {
+ MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
+ MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
+ MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+};
+
+enum mlx5hws_context_shared_stc_type {
+ MLX5HWS_CONTEXT_SHARED_STC_DECAP_L3 = 0,
+ MLX5HWS_CONTEXT_SHARED_STC_DOUBLE_POP = 1,
+ MLX5HWS_CONTEXT_SHARED_STC_MAX = 2,
+};
+
+struct mlx5hws_context_common_res {
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_action_shared_stc *shared_stc[MLX5HWS_CONTEXT_SHARED_STC_MAX];
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+};
+
+struct mlx5hws_context_debug_info {
+ struct dentry *steering_debugfs;
+ struct dentry *fdb_debugfs;
+};
+
+struct mlx5hws_context_vports {
+ u16 esw_manager_gvmi;
+ u16 uplink_gvmi;
+ struct xarray vport_gvmi_xa;
+};
+
+struct mlx5hws_context {
+ struct mlx5_core_dev *mdev;
+ struct mlx5hws_cmd_query_caps *caps;
+ u32 pd_num;
+ struct mlx5hws_pool *stc_pool[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_context_common_res common_res[MLX5HWS_TABLE_TYPE_MAX];
+ struct mlx5hws_pattern_cache *pattern_cache;
+ struct mlx5hws_definer_cache *definer_cache;
+ struct mutex ctrl_lock; /* control lock to protect the whole context */
+ enum mlx5hws_context_flags flags;
+ struct mlx5hws_send_engine *send_queue;
+ size_t queues;
+ struct mutex *bwc_send_queue_locks; /* protect BWC queues */
+ struct list_head tbl_list;
+ struct mlx5hws_context_debug_info debug_info;
+ struct xarray peer_ctx_xa;
+ struct mlx5hws_context_vports vports;
+};
+
+static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+}
+
+bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
+
+u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_CONTEXT_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
new file mode 100644
index 000000000000..2b8c5a4e1c4c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "mlx5hws_internal.h"
+
+static int
+hws_debug_dump_matcher_template_definer(struct seq_file *f,
+ void *parent_obj,
+ struct mlx5hws_definer *definer,
+ enum mlx5hws_debug_res_type type)
+{
+ int i;
+
+ if (!definer)
+ return 0;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,",
+ type,
+ HWS_PTR_TO_ID(definer),
+ HWS_PTR_TO_ID(parent_obj),
+ definer->obj_id,
+ definer->type);
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->dw_selector[i],
+ (i == DW_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ seq_printf(f, "0x%x%s", definer->byte_selector[i],
+ (i == BYTE_SELECTORS - 1) ? "," : "-");
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ seq_printf(f, "%02x", definer->mask.jumbo[i]);
+
+ seq_puts(f, "\n");
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_match_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_debug_res_type type;
+ int i, ret;
+
+ for (i = 0; i < matcher->num_of_mt; i++) {
+ struct mlx5hws_match_template *mt = &matcher->mt[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE,
+ HWS_PTR_TO_ID(mt),
+ HWS_PTR_TO_ID(matcher),
+ mt->fc_sz,
+ 0, 0);
+
+ type = MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER;
+ ret = hws_debug_dump_matcher_template_definer(f, mt, mt->definer, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_action_template(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_action_type action_type;
+ int i, j;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE,
+ HWS_PTR_TO_ID(at),
+ HWS_PTR_TO_ID(matcher),
+ at->only_term,
+ at->num_of_action_stes,
+ at->num_actions);
+
+ for (j = 0; j < at->num_actions; j++) {
+ action_type = at->action_type_arr[j];
+ seq_printf(f, ",%s", mlx5hws_action_type_to_str(action_type));
+ }
+
+ seq_puts(f, "\n");
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_matcher_attr(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR,
+ HWS_PTR_TO_ID(matcher),
+ attr->priority,
+ attr->mode,
+ attr->table.sz_row_log,
+ attr->table.sz_col_log,
+ attr->optimize_using_rule_idx,
+ attr->optimize_flow_src,
+ attr->insert_mode,
+ attr->distribute_mode);
+
+ return 0;
+}
+
+static int hws_debug_dump_matcher(struct seq_file *f, struct mlx5hws_matcher *matcher)
+{
+ enum mlx5hws_table_type tbl_type = matcher->tbl->type;
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ u32 ste_0_id = -1;
+ u32 ste_1_id = -1;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,0x%llx",
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER,
+ HWS_PTR_TO_ID(matcher),
+ HWS_PTR_TO_ID(matcher->tbl),
+ matcher->num_of_mt,
+ matcher->end_ft_id,
+ matcher->col_matcher ? HWS_PTR_TO_ID(matcher->col_matcher) : 0);
+
+ ste = &matcher->match_ste.ste;
+ ste_pool = matcher->match_ste.pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ }
+
+ seq_printf(f, ",%d,%d,%d,%d",
+ matcher->match_ste.rtc_0_id,
+ (int)ste_0_id,
+ matcher->match_ste.rtc_1_id,
+ (int)ste_1_id);
+
+ ste = &matcher->action_ste[0].ste;
+ ste_pool = matcher->action_ste[0].pool;
+ if (ste_pool) {
+ ste_0_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ ste_1_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ else
+ ste_1_id = -1;
+ } else {
+ ste_0_id = -1;
+ ste_1_id = -1;
+ }
+
+ ft_attr.type = matcher->tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(matcher->tbl->ctx->mdev,
+ matcher->end_ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",%d,%d,%d,%d,%d,0x%llx,0x%llx\n",
+ matcher->action_ste[0].rtc_0_id,
+ (int)ste_0_id,
+ matcher->action_ste[0].rtc_1_id,
+ (int)ste_1_id,
+ 0,
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1));
+
+ ret = hws_debug_dump_matcher_attr(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_match_template(f, matcher);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_matcher_action_template(f, matcher);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_table(struct seq_file *f, struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_query_attr ft_attr = {0};
+ struct mlx5hws_matcher *matcher;
+ u64 local_icm_addr_0 = 0;
+ u64 local_icm_addr_1 = 0;
+ u64 icm_addr_0 = 0;
+ u64 icm_addr_1 = 0;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,0x%llx,%d,%d,%d,%d,%d",
+ MLX5HWS_DEBUG_RES_TYPE_TABLE,
+ HWS_PTR_TO_ID(tbl),
+ HWS_PTR_TO_ID(tbl->ctx),
+ tbl->ft_id,
+ MLX5HWS_TABLE_TYPE_BASE + tbl->type,
+ tbl->fw_ft_type,
+ tbl->level,
+ 0);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ret = mlx5hws_cmd_flow_table_query(tbl->ctx->mdev,
+ tbl->ft_id,
+ &ft_attr,
+ &icm_addr_0,
+ &icm_addr_1);
+ if (ret)
+ return ret;
+
+ seq_printf(f, ",0x%llx,0x%llx,0x%llx,0x%llx,0x%llx\n",
+ mlx5hws_debug_icm_to_idx(icm_addr_0),
+ mlx5hws_debug_icm_to_idx(icm_addr_1),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_0),
+ mlx5hws_debug_icm_to_idx(local_icm_addr_1),
+ HWS_PTR_TO_ID(tbl->default_miss.miss_tbl));
+
+ list_for_each_entry(matcher, &tbl->matchers_list, list_node) {
+ ret = hws_debug_dump_matcher(f, matcher);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump_context_send_engine(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_send_engine *send_queue;
+ struct mlx5hws_send_ring *send_ring;
+ struct mlx5hws_send_ring_cq *cq;
+ struct mlx5hws_send_ring_sq *sq;
+ int i;
+
+ for (i = 0; i < (int)ctx->queues; i++) {
+ send_queue = &ctx->send_queue[i];
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE,
+ HWS_PTR_TO_ID(ctx),
+ i,
+ send_queue->used_entries,
+ send_queue->num_entries,
+ 1, /* one send ring per queue */
+ send_queue->num_entries,
+ send_queue->err,
+ send_queue->completed.ci,
+ send_queue->completed.pi,
+ send_queue->completed.mask);
+
+ send_ring = &send_queue->send_ring;
+ cq = &send_ring->send_cq;
+ sq = &send_ring->send_sq;
+
+ seq_printf(f, "%d,0x%llx,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING,
+ HWS_PTR_TO_ID(ctx),
+ 0, /* one send ring per send queue */
+ i,
+ cq->mcq.cqn,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ cq->mcq.cqe_sz,
+ sq->sqn,
+ 0,
+ 0,
+ 0);
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context_caps(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_cmd_query_caps *caps = ctx->caps;
+
+ seq_printf(f, "%d,0x%llx,%s,%d,%d,%d,%d,",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS,
+ HWS_PTR_TO_ID(ctx),
+ caps->fw_ver,
+ caps->wqe_based_update,
+ caps->ste_format,
+ caps->ste_alloc_log_max,
+ caps->log_header_modify_argument_max_alloc);
+
+ seq_printf(f, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%s\n",
+ caps->flex_protocols,
+ caps->rtc_reparse_mode,
+ caps->rtc_index_mode,
+ caps->ste_alloc_log_gran,
+ caps->stc_alloc_log_max,
+ caps->stc_alloc_log_gran,
+ caps->rtc_log_depth_max,
+ caps->format_select_gtpu_dw_0,
+ caps->format_select_gtpu_dw_1,
+ caps->format_select_gtpu_dw_2,
+ caps->format_select_gtpu_ext_dw_0,
+ caps->nic_ft.max_level,
+ caps->nic_ft.reparse,
+ caps->fdb_ft.max_level,
+ caps->fdb_ft.reparse,
+ caps->log_header_modify_argument_granularity,
+ caps->linear_match_definer,
+ "regc_3");
+
+ return 0;
+}
+
+static int hws_debug_dump_context_attr(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ seq_printf(f, "%u,0x%llx,%d,%zu,%d,%s,%d,%d\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR,
+ HWS_PTR_TO_ID(ctx),
+ ctx->pd_num,
+ ctx->queues,
+ ctx->send_queue->num_entries,
+ "None", /* no shared gvmi */
+ ctx->caps->vhca_id,
+ 0xffff); /* no shared gvmi */
+
+ return 0;
+}
+
+static int hws_debug_dump_context_info(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ int ret;
+
+ seq_printf(f, "%d,0x%llx,%d,%s,%s.KERNEL_%u_%u_%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT,
+ HWS_PTR_TO_ID(ctx),
+ ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT,
+ pci_name(dev->pdev),
+ HWS_DEBUG_FORMAT_VERSION,
+ LINUX_VERSION_MAJOR,
+ LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL);
+
+ ret = hws_debug_dump_context_attr(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_caps(f, ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc_resource(struct seq_file *f,
+ struct mlx5hws_context *ctx,
+ u32 tbl_type,
+ struct mlx5hws_pool_resource *resource)
+{
+ seq_printf(f, "%d,0x%llx,%u,%u\n",
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC,
+ HWS_PTR_TO_ID(ctx),
+ tbl_type,
+ resource->base_id);
+
+ return 0;
+}
+
+static int hws_debug_dump_context_stc(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_pool *stc_pool;
+ u32 table_type;
+ int ret;
+ int i;
+
+ for (i = 0; i < MLX5HWS_TABLE_TYPE_MAX; i++) {
+ stc_pool = ctx->stc_pool[i];
+ table_type = MLX5HWS_TABLE_TYPE_BASE + i;
+
+ if (!stc_pool)
+ continue;
+
+ if (stc_pool->resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->resource[0]);
+ if (ret)
+ return ret;
+ }
+
+ if (i == MLX5HWS_TABLE_TYPE_FDB && stc_pool->mirror_resource[0]) {
+ ret = hws_debug_dump_context_stc_resource(f, ctx, table_type,
+ stc_pool->mirror_resource[0]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hws_debug_dump_context(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ ret = hws_debug_dump_context_info(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_send_engine(f, ctx);
+ if (ret)
+ return ret;
+
+ ret = hws_debug_dump_context_stc(f, ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(tbl, &ctx->tbl_list, tbl_list_node) {
+ ret = hws_debug_dump_table(f, tbl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+hws_debug_dump(struct seq_file *f, struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!f || !ctx)
+ return -EINVAL;
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = hws_debug_dump_context(f, ctx);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+static int hws_dump_show(struct seq_file *file, void *priv)
+{
+ return hws_debug_dump(file, file->private);
+}
+DEFINE_SHOW_ATTRIBUTE(hws_dump);
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx)
+{
+ struct mlx5_core_dev *dev = ctx->mdev;
+ char file_name[128];
+
+ ctx->debug_info.steering_debugfs =
+ debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+ ctx->debug_info.fdb_debugfs =
+ debugfs_create_dir("fdb", ctx->debug_info.steering_debugfs);
+
+ sprintf(file_name, "ctx_%p", ctx);
+ debugfs_create_file(file_name, 0444, ctx->debug_info.fdb_debugfs,
+ ctx, &hws_dump_fops);
+}
+
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx)
+{
+ debugfs_remove_recursive(ctx->debug_info.steering_debugfs);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
new file mode 100644
index 000000000000..b93a536035d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_debug.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEBUG_H_
+#define MLX5HWS_DEBUG_H_
+
+#define HWS_DEBUG_FORMAT_VERSION "1.0"
+
+#define HWS_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum mlx5hws_debug_res_type {
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT = 4000,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_ATTR = 4001,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_CAPS = 4002,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_ENGINE = 4003,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_SEND_RING = 4004,
+ MLX5HWS_DEBUG_RES_TYPE_CONTEXT_STC = 4005,
+
+ MLX5HWS_DEBUG_RES_TYPE_TABLE = 4100,
+
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER = 4200,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ATTR = 4201,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_MATCH_TEMPLATE = 4202,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_MATCH_DEFINER = 4203,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE = 4204,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_HASH_DEFINER = 4205,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_RANGE_DEFINER = 4206,
+ MLX5HWS_DEBUG_RES_TYPE_MATCHER_TEMPLATE_COMPARE_MATCH_DEFINER = 4207,
+};
+
+static inline u64
+mlx5hws_debug_icm_to_idx(u64 icm_addr)
+{
+ return (icm_addr >> 6) & 0xffffffff;
+}
+
+void mlx5hws_debug_init_dump(struct mlx5hws_context *ctx);
+void mlx5hws_debug_uninit_dump(struct mlx5hws_context *ctx);
+
+#endif /* MLX5HWS_DEBUG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
new file mode 100644
index 000000000000..3bdb5c90efff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.c
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN BIT(12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE BIT(13)
+#define MLX5_FLOW_LAYER_GRE BIT(14)
+#define MLX5_FLOW_LAYER_MPLS BIT(15)
+
+/* Pattern tunnel Layer bits (continued). */
+#define MLX5_FLOW_LAYER_IPIP BIT(23)
+#define MLX5_FLOW_LAYER_IPV6_ENCAP BIT(24)
+#define MLX5_FLOW_LAYER_NVGRE BIT(25)
+#define MLX5_FLOW_LAYER_GENEVE BIT(26)
+
+#define MLX5_FLOW_ITEM_FLEX_TUNNEL BIT_ULL(39)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
+ MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
+ MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
+ MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
+#define GTP_PDU_SC 0x85
+#define BAD_PORT 0xBAD
+#define ETH_TYPE_IPV4_VXLAN 0x0800
+#define ETH_TYPE_IPV6_VXLAN 0x86DD
+#define UDP_GTPU_PORT 2152
+#define UDP_PORT_MPLS 6635
+#define UDP_GENEVE_PORT 6081
+#define UDP_ROCEV2_PORT 4791
+#define HWS_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
+
+#define STE_NO_VLAN 0x0
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define STE_NO_L3 0x0
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_NO_L4 0x0
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_ICMP 0x3
+#define STE_ESP 0x3
+
+#define IPV4 0x4
+#define IPV6 0x6
+
+/* Setter function based on bit offset and mask, for 32bit DW */
+#define _HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + ((byte_off) / 4)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + \
+ ((byte_off) / 4))) & \
+ (~((mask) << (bit_off)))) | \
+ (((_v) & (mask)) << \
+ (bit_off))); \
+ } while (0)
+
+/* Setter function based on bit offset and mask, for unaligned 32bit DW */
+#define HWS_SET32(p, v, byte_off, bit_off, mask) \
+ do { \
+ if (unlikely((bit_off) < 0)) { \
+ u32 _bit_off = -1 * (bit_off); \
+ u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
+ _HWS_SET32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
+ _HWS_SET32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
+ (bit_off) % BITS_IN_DW, second_dw_mask); \
+ } else { \
+ _HWS_SET32(p, v, byte_off, (bit_off), (mask)); \
+ } \
+ } while (0)
+
+/* Getter for up to aligned 32bit DW */
+#define HWS_GET32(p, byte_off, bit_off, mask) \
+ ((be32_to_cpu(*((__be32 *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
+
+#define HWS_CALC_FNAME(field, inner) \
+ ((inner) ? MLX5HWS_DEFINER_FNAME_##field##_I : \
+ MLX5HWS_DEFINER_FNAME_##field##_O)
+
+#define HWS_GET_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD_SET(match_param, hdr) \
+ (!!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) ({ \
+ BUILD_BUG_ON((sz_in_bits) % 32); \
+ u32 sz = sz_in_bits; \
+ u32 res = 0; \
+ u32 dw_off = __mlx5_dw_off(fte_match_param, hdr); \
+ while (!res && sz >= 32) { \
+ res = *((match_param) + (dw_off++)); \
+ sz -= 32; \
+ } \
+ res; \
+ })
+
+#define HWS_IS_FLD_SET_SZ(match_param, hdr, sz_in_bits) \
+ (((sz_in_bits) > 32) ? HWS_IS_FLD_SET_DW_ARR(match_param, hdr, sz_in_bits) : \
+ !!(HWS_GET_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_GET64_MATCH_PARAM(match_param, hdr) \
+ MLX5_GET64(fte_match_param, match_param, hdr)
+
+#define HWS_IS_FLD64_SET(match_param, hdr) \
+ (!!(HWS_GET64_MATCH_PARAM(match_param, hdr)))
+
+#define HWS_CALC_HDR_SRC(fc, s_hdr) \
+ do { \
+ (fc)->s_bit_mask = __mlx5_mask(fte_match_param, s_hdr); \
+ (fc)->s_bit_off = __mlx5_dw_bit_off(fte_match_param, s_hdr); \
+ (fc)->s_byte_off = MLX5_BYTE_OFF(fte_match_param, s_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR_DST(fc, d_hdr) \
+ do { \
+ (fc)->bit_mask = __mlx5_mask(definer_hl, d_hdr); \
+ (fc)->bit_off = __mlx5_dw_bit_off(definer_hl, d_hdr); \
+ (fc)->byte_off = MLX5_BYTE_OFF(definer_hl, d_hdr); \
+ } while (0)
+
+#define HWS_CALC_HDR(fc, s_hdr, d_hdr) \
+ do { \
+ HWS_CALC_HDR_SRC(fc, s_hdr); \
+ HWS_CALC_HDR_DST(fc, d_hdr); \
+ (fc)->tag_set = &hws_definer_generic_set; \
+ } while (0)
+
+#define HWS_SET_HDR(fc_arr, match_param, fname, s_hdr, d_hdr) \
+ do { \
+ if (HWS_IS_FLD_SET(match_param, s_hdr)) \
+ HWS_CALC_HDR(&(fc_arr)[MLX5HWS_DEFINER_FNAME_##fname], s_hdr, d_hdr); \
+ } while (0)
+
+struct mlx5hws_definer_sel_ctrl {
+ u8 allowed_full_dw; /* Full DW selectors cover all offsets */
+ u8 allowed_lim_dw; /* Limited DW selectors cover offset < 64 */
+ u8 allowed_bytes; /* Bytes selectors, up to offset 255 */
+ u8 used_full_dw;
+ u8 used_lim_dw;
+ u8 used_bytes;
+ u8 full_dw_selector[DW_SELECTORS];
+ u8 lim_dw_selector[DW_SELECTORS_LIMITED];
+ u8 byte_selector[BYTE_SELECTORS];
+};
+
+struct mlx5hws_definer_conv_data {
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_definer_fc *fc;
+ /* enum mlx5hws_definer_match_flag */
+ u32 match_flags;
+};
+
+static void
+hws_definer_ones_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ HWS_SET32(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_generic_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ /* Can be optimized */
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ HWS_SET32(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_outer_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag))
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag))
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ bool inner)
+{
+ u32 second_cvlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag);
+ u32 second_svlan_tag = inner ?
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag) :
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag);
+
+ if (second_cvlan_tag)
+ HWS_SET32(tag, STE_CVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (second_svlan_tag)
+ HWS_SET32(tag, STE_SVLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_VLAN, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_inner_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, true);
+}
+
+static void
+hws_definer_outer_second_vlan_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_second_vlan_type_set(fc, match_param, tag, false);
+}
+
+static void hws_definer_icmp_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmp_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_icmpv6_dw1_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 code = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_code);
+ u32 type = HWS_GET_MATCH_PARAM(match_param, misc_parameters_3.icmpv6_type);
+ u32 dw = (type << __mlx5_dw_bit_off(header_icmp, type)) |
+ (code << __mlx5_dw_bit_off(header_icmp, code));
+
+ HWS_SET32(tag, dw, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_l3_type_set(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ u32 val = HWS_GET32(match_param, fc->s_byte_off, fc->s_bit_off, fc->s_bit_mask);
+
+ if (val == IPV4)
+ HWS_SET32(tag, STE_IPV4, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else if (val == IPV6)
+ HWS_SET32(tag, STE_IPV6, fc->byte_off, fc->bit_off, fc->bit_mask);
+ else
+ HWS_SET32(tag, STE_NO_L3, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_port_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag,
+ struct mlx5hws_context *peer_ctx)
+{
+ u16 source_port = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port);
+ u16 vport_gvmi = 0;
+ int ret;
+
+ ret = mlx5hws_vport_get_gvmi(peer_ctx, source_port, &vport_gvmi);
+ if (ret) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Vport 0x%x is disabled or invalid\n", source_port);
+ return;
+ }
+
+ if (vport_gvmi)
+ HWS_SET32(tag, vport_gvmi, fc->byte_off, fc->bit_off, fc->bit_mask);
+}
+
+static void
+hws_definer_set_source_gvmi_vhca_id(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+__must_hold(&fc->ctx->ctrl_lock)
+{
+ int id = HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_eswitch_owner_vhca_id);
+ struct mlx5hws_context *peer_ctx;
+
+ if (id == fc->ctx->caps->vhca_id)
+ peer_ctx = fc->ctx;
+ else
+ peer_ctx = xa_load(&fc->ctx->peer_ctx_xa, id);
+
+ if (!peer_ctx) {
+ HWS_SET32(tag, BAD_PORT, fc->byte_off, fc->bit_off, fc->bit_mask);
+ mlx5hws_err(fc->ctx, "Invalid vhca_id provided 0x%x\n", id);
+ return;
+ }
+
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, peer_ctx);
+}
+
+static void
+hws_definer_set_source_gvmi(struct mlx5hws_definer_fc *fc,
+ void *match_param,
+ u8 *tag)
+{
+ hws_definer_set_source_port_gvmi(fc, match_param, tag, fc->ctx);
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_steering_ok_bits_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser0_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser1_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser2_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser3_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser4_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser5_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser6_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK];
+ HWS_CALC_HDR_DST(fc, oks1.flex_parser7_steering_ok);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser steering ok index %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_flex_parser_handler(struct mlx5hws_definer_conv_data *cd,
+ u8 parser_id)
+{
+ struct mlx5hws_definer_fc *fc;
+
+ switch (parser_id) {
+ case 0:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_0);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 1:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_1);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 2:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_2);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 3:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_3);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 4:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_4);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 5:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_5);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 6:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_6);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ case 7:
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7];
+ HWS_CALC_HDR_DST(fc, flex_parser.flex_parser_7);
+ fc->tag_set = &hws_definer_generic_set;
+ break;
+ default:
+ mlx5hws_err(cd->ctx, "Unsupported flex parser %u\n", parser_id);
+ return NULL;
+ }
+
+ return fc;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_misc4_fields_handler(struct mlx5hws_definer_conv_data *cd,
+ bool *parser_is_used,
+ u32 id,
+ u32 value)
+{
+ if (id || value) {
+ if (id >= HWS_NUM_OF_FLEX_PARSERS) {
+ mlx5hws_err(cd->ctx, "Unsupported parser id\n");
+ return NULL;
+ }
+
+ if (parser_is_used[id]) {
+ mlx5hws_err(cd->ctx, "Parser id have been used\n");
+ return NULL;
+ }
+ }
+
+ parser_is_used[id] = true;
+
+ return hws_definer_flex_parser_handler(cd, id);
+}
+
+static int
+hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
+{
+ u32 flags;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2);
+
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ flags = cd->match_flags & (MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 |
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O |
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I);
+ if (flags & (flags - 1))
+ goto err_conflict;
+
+ return 0;
+
+err_conflict:
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ return -EINVAL;
+}
+
+static int
+hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_O,
+ outer_headers.ethertype,
+ eth_l2_outer.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_O,
+ outer_headers.smac_47_16, eth_l2_src_outer.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_O,
+ outer_headers.smac_15_0, eth_l2_src_outer.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_O,
+ outer_headers.dmac_47_16, eth_l2_outer.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_O,
+ outer_headers.dmac_15_0, eth_l2_outer.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_O,
+ outer_headers.first_prio, eth_l2_outer.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_O,
+ outer_headers.first_cfi, eth_l2_outer.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_O,
+ outer_headers.first_vid, eth_l2_outer.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, outer_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, outer_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
+ outer_headers.ip_protocol,
+ eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_TTL_O,
+ outer_headers.ttl_hoplimit,
+ eth_l3_outer.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_O,
+ outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_outer.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_outer.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_outer.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_outer.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_O,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_outer.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.tcp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.tcp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_O,
+ outer_headers.udp_sport, eth_l4_outer.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_O,
+ outer_headers.udp_dport, eth_l4_outer.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_O,
+ outer_headers.tcp_flags, eth_l4_outer.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_O,
+ outer_headers.ip_dscp, eth_l3_outer.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_O,
+ outer_headers.ip_ecn, eth_l3_outer.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_O,
+ outer_headers.ipv4_ihl, eth_l3_outer.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.frag)) {
+ smac_set = HWS_IS_FLD_SET(match_param, outer_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, outer_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, outer_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l4_outer.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_O,
+ outer_headers.frag, eth_l2_src_outer.ip_fragmented);
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, outer_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, outer_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool is_s_ipv6, is_d_ipv6, smac_set, dmac_set;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ u32 *s_ipv6, *d_ipv6;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
+ return -EINVAL;
+ }
+
+ /* L2 Check ethertype */
+ HWS_SET_HDR(fc, match_param, ETH_TYPE_I,
+ inner_headers.ethertype,
+ eth_l2_inner.l3_ethertype);
+ /* L2 Check SMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_47_16_I,
+ inner_headers.smac_47_16, eth_l2_src_inner.smac_47_16);
+ /* L2 Check SMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_SMAC_15_0_I,
+ inner_headers.smac_15_0, eth_l2_src_inner.smac_15_0);
+ /* L2 Check DMAC 47_16 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_47_16_I,
+ inner_headers.dmac_47_16, eth_l2_inner.dmac_47_16);
+ /* L2 Check DMAC 15_0 */
+ HWS_SET_HDR(fc, match_param, ETH_DMAC_15_0_I,
+ inner_headers.dmac_15_0, eth_l2_inner.dmac_15_0);
+
+ /* L2 VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_FIRST_PRIO_I,
+ inner_headers.first_prio, eth_l2_inner.first_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_CFI_I,
+ inner_headers.first_cfi, eth_l2_inner.first_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_ID_I,
+ inner_headers.first_vid, eth_l2_inner.first_vlan_id);
+
+ /* L2 CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, inner_headers.cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, inner_headers.svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.first_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+ /* L3 Check IP header */
+ HWS_SET_HDR(fc, match_param, IP_PROTOCOL_I,
+ inner_headers.ip_protocol,
+ eth_l3_inner.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_I,
+ inner_headers.ip_version,
+ eth_l3_inner.ip_version);
+ HWS_SET_HDR(fc, match_param, IP_TTL_I,
+ inner_headers.ttl_hoplimit,
+ eth_l3_inner.time_to_live_hop_limit);
+
+ /* L3 Check IPv4/IPv6 addresses */
+ s_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.src_ipv4_src_ipv6.ipv6_layout);
+ d_ipv6 = MLX5_ADDR_OF(fte_match_param, match_param,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_layout);
+
+ /* Assume IPv6 is used if ipv6 bits are set */
+ is_s_ipv6 = s_ipv6[0] || s_ipv6[1] || s_ipv6[2];
+ is_d_ipv6 = d_ipv6[0] || d_ipv6[1] || d_ipv6[2];
+
+ if (is_s_ipv6) {
+ /* Handle IPv6 source address */
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_127_96_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_src_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_95_64_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_src_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_63_32_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_src_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_SRC_31_0_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_src_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 source address */
+ HWS_SET_HDR(fc, match_param, IPV4_SRC_I,
+ inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.source_address);
+ }
+ if (is_d_ipv6) {
+ /* Handle IPv6 destination address */
+ HWS_SET_HDR(fc, match_param, IPV6_DST_127_96_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96,
+ ipv6_dst_inner.ipv6_address_127_96);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_95_64_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64,
+ ipv6_dst_inner.ipv6_address_95_64);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_63_32_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32,
+ ipv6_dst_inner.ipv6_address_63_32);
+ HWS_SET_HDR(fc, match_param, IPV6_DST_31_0_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv6_dst_inner.ipv6_address_31_0);
+ } else {
+ /* Handle IPv4 destination address */
+ HWS_SET_HDR(fc, match_param, IPV4_DST_I,
+ inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0,
+ ipv4_src_dest_inner.destination_address);
+ }
+
+ /* L4 Handle TCP/UDP */
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.tcp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.tcp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, L4_SPORT_I,
+ inner_headers.udp_sport, eth_l4_inner.source_port);
+ HWS_SET_HDR(fc, match_param, L4_DPORT_I,
+ inner_headers.udp_dport, eth_l4_inner.destination_port);
+ HWS_SET_HDR(fc, match_param, TCP_FLAGS_I,
+ inner_headers.tcp_flags, eth_l4_inner.tcp_flags);
+
+ /* L3 Handle DSCP, ECN and IHL */
+ HWS_SET_HDR(fc, match_param, IP_DSCP_I,
+ inner_headers.ip_dscp, eth_l3_inner.dscp);
+ HWS_SET_HDR(fc, match_param, IP_ECN_I,
+ inner_headers.ip_ecn, eth_l3_inner.ecn);
+ HWS_SET_HDR(fc, match_param, IPV4_IHL_I,
+ inner_headers.ipv4_ihl, eth_l3_inner.ihl);
+
+ /* Set IP fragmented bit */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.frag)) {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.vxlan_vni)) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_inner.ip_fragmented);
+ } else {
+ smac_set = HWS_IS_FLD_SET(match_param, inner_headers.smac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.smac_47_16);
+ dmac_set = HWS_IS_FLD_SET(match_param, inner_headers.dmac_15_0) ||
+ HWS_IS_FLD_SET(match_param, inner_headers.dmac_47_16);
+ if (smac_set == dmac_set) {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l4_inner.ip_fragmented);
+ } else {
+ HWS_SET_HDR(fc, match_param, IP_FRAG_I,
+ inner_headers.frag, eth_l2_src_inner.ip_fragmented);
+ }
+ }
+ }
+
+ /* L3_type set */
+ if (HWS_IS_FLD_SET(match_param, inner_headers.ip_version)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.l3_type);
+ curr_fc->tag_set = &hws_definer_l3_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ HWS_CALC_HDR_SRC(curr_fc, inner_headers.ip_version);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1, 0x1) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_64, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_d8, 0x6) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_e0, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_100, 0xc) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_120, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_140, 0x8) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_dst_qp) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.bth_opcode) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.inner_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.outer_esp_spi) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters.source_vhca_port) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters.reserved_at_1a0, 0x60)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc parameters set\n");
+ return -EINVAL;
+ }
+
+ /* Check GRE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_c_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_C];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_c_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_c_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_c_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_k_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_K];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_k_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_s_present)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_S];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_s_present,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_s_present);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_s_present);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.gre_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.gre_key.key)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE |
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY;
+ HWS_SET_HDR(fc, match_param, GRE_OPT_KEY,
+ misc_parameters.gre_key.key, tunnel_header.tunnel_header_2);
+ }
+
+ /* Check GENEVE related fields */
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_VNI];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_opt_len)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_opt_len,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, opt_len);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, opt_len);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_protocol_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_PROTO];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_protocol_type,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.geneve_oam)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GENEVE_OAM];
+ HWS_CALC_HDR(curr_fc,
+ misc_parameters.geneve_oam,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_geneve, o_flag);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_geneve, o_flag);
+ }
+
+ HWS_SET_HDR(fc, match_param, SOURCE_QP,
+ misc_parameters.source_sqn, source_qp_gvmi.source_qp);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_O,
+ misc_parameters.outer_ipv6_flow_label, eth_l3_outer.flow_label);
+ HWS_SET_HDR(fc, match_param, IPV6_FLOW_LABEL_I,
+ misc_parameters.inner_ipv6_flow_label, eth_l3_inner.flow_label);
+
+ /* L2 Second VLAN */
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_O,
+ misc_parameters.outer_second_prio, eth_l2_outer.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_PRIO_I,
+ misc_parameters.inner_second_prio, eth_l2_inner.second_priority);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_O,
+ misc_parameters.outer_second_cfi, eth_l2_outer.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_CFI_I,
+ misc_parameters.inner_second_cfi, eth_l2_inner.second_cfi);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_O,
+ misc_parameters.outer_second_vid, eth_l2_outer.second_vlan_id);
+ HWS_SET_HDR(fc, match_param, VLAN_SECOND_ID_I,
+ misc_parameters.inner_second_vid, eth_l2_inner.second_vlan_id);
+
+ /* L2 Second CVLAN and SVLAN */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.outer_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_outer.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_outer_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_cvlan_tag) ||
+ HWS_GET_MATCH_PARAM(match_param, misc_parameters.inner_second_svlan_tag)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I];
+ HWS_CALC_HDR_DST(curr_fc, eth_l2_inner.second_vlan_qualifier);
+ curr_fc->tag_set = &hws_definer_inner_second_vlan_type_set;
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ }
+
+ /* VXLAN VNI */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.vxlan_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN;
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters.vxlan_vni, tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
+ }
+
+ /* Flex protocol steering ok bits */
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.geneve_tlv_option_0_exist)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ if (!caps->flex_parser_ok_bits_supp) {
+ mlx5hws_err(cd->ctx, "Unsupported flex_parser_ok_bits_supp capability\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_steering_ok_bits_handler(
+ cd, caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters.geneve_tlv_option_0_exist);
+ }
+
+ if (HWS_GET_MATCH_PARAM(match_param, misc_parameters.source_port)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_SOURCE_GVMI];
+ HWS_CALC_HDR_DST(curr_fc, source_qp_gvmi.source_gvmi);
+ curr_fc->tag_mask_set = &hws_definer_ones_set;
+ curr_fc->tag_set = HWS_IS_FLD_SET(match_param,
+ misc_parameters.source_eswitch_owner_vhca_id) ?
+ &hws_definer_set_source_gvmi_vhca_id :
+ &hws_definer_set_source_gvmi;
+ } else {
+ if (HWS_IS_FLD_SET(match_param, misc_parameters.source_eswitch_owner_vhca_id)) {
+ mlx5hws_err(cd->ctx,
+ "Unsupported source_eswitch_owner_vhca_id field usage\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 parameters set\n");
+ return -EINVAL;
+ }
+
+ HWS_SET_HDR(fc, match_param, MPLS0_O,
+ misc_parameters_2.outer_first_mpls, mpls_outer.mpls0_label);
+ HWS_SET_HDR(fc, match_param, MPLS0_I,
+ misc_parameters_2.inner_first_mpls, mpls_inner.mpls0_label);
+ HWS_SET_HDR(fc, match_param, REG_0,
+ misc_parameters_2.metadata_reg_c_0, registers.register_c_0);
+ HWS_SET_HDR(fc, match_param, REG_1,
+ misc_parameters_2.metadata_reg_c_1, registers.register_c_1);
+ HWS_SET_HDR(fc, match_param, REG_2,
+ misc_parameters_2.metadata_reg_c_2, registers.register_c_2);
+ HWS_SET_HDR(fc, match_param, REG_3,
+ misc_parameters_2.metadata_reg_c_3, registers.register_c_3);
+ HWS_SET_HDR(fc, match_param, REG_4,
+ misc_parameters_2.metadata_reg_c_4, registers.register_c_4);
+ HWS_SET_HDR(fc, match_param, REG_5,
+ misc_parameters_2.metadata_reg_c_5, registers.register_c_5);
+ HWS_SET_HDR(fc, match_param, REG_6,
+ misc_parameters_2.metadata_reg_c_6, registers.register_c_6);
+ HWS_SET_HDR(fc, match_param, REG_7,
+ misc_parameters_2.metadata_reg_c_7, registers.register_c_7);
+ HWS_SET_HDR(fc, match_param, REG_A,
+ misc_parameters_2.metadata_reg_a, metadata.general_purpose);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_gre)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over gre parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_gre);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_gre);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_2.outer_first_mpls_over_udp)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc2 first mpls over udp parameters set\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = hws_definer_flex_parser_handler(cd, caps->flex_parser_id_mpls_over_udp);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_2.outer_first_mpls_over_udp);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc3(struct mlx5hws_definer_conv_data *cd, u32 *match_param)
+{
+ struct mlx5hws_cmd_query_caps *caps = cd->ctx->caps;
+ struct mlx5hws_definer_fc *fc = cd->fc;
+ struct mlx5hws_definer_fc *curr_fc;
+ bool vxlan_gpe_flex_parser_enabled;
+
+ /* Check reserved and unsupported fields */
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_80, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_b0, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_170, 0x10) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_3.reserved_at_1e0, 0x20)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc3 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.inner_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_I;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.inner_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.inner_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_seq_num) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_tcp_ack_num)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TCP_O;
+ HWS_SET_HDR(fc, match_param, TCP_SEQ_NUM,
+ misc_parameters_3.outer_tcp_seq_num, tcp_icmp.tcp_seq);
+ HWS_SET_HDR(fc, match_param, TCP_ACK_NUM,
+ misc_parameters_3.outer_tcp_ack_num, tcp_icmp.tcp_ack);
+ }
+
+ vxlan_gpe_flex_parser_enabled = caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_vni)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_vni,
+ tunnel_header.tunnel_header_1);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_next_protocol)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_next_protocol,
+ tunnel_header.tunnel_header_0);
+ curr_fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.outer_vxlan_gpe_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE;
+
+ if (!vxlan_gpe_flex_parser_enabled) {
+ mlx5hws_err(cd->ctx, "Unsupported VXLAN GPE flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS];
+ HWS_CALC_HDR(curr_fc, misc_parameters_3.outer_vxlan_gpe_flags,
+ tunnel_header.tunnel_header_0);
+ curr_fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
+ curr_fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv4 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmp_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmp_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmp_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_header_data) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported ICMPv6 parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ HWS_SET_HDR(fc, match_param, ICMP_DW3,
+ misc_parameters_3.icmpv6_header_data, tcp_icmp.icmp_dw3);
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_type) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_3.icmpv6_code)) {
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_ICMP_DW1];
+ HWS_CALC_HDR_DST(curr_fc, tcp_icmp.icmp_dw1);
+ curr_fc->tag_set = &hws_definer_icmpv6_dw1_set;
+ }
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.geneve_tlv_option_0_data)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE;
+
+ curr_fc =
+ hws_definer_flex_parser_handler(cd,
+ caps->flex_parser_id_geneve_tlv_option_0);
+ if (!curr_fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.geneve_tlv_option_0_data);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_teid)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU TEID flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_TEID];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, teid);
+ fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_teid);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_type)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_type);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_msg_flags)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ fc = &cd->fc[MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE];
+ fc->tag_set = &hws_definer_generic_set;
+ fc->bit_mask = __mlx5_mask(header_gtp, msg_flags);
+ fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_flags);
+ fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_msg_flags);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW2 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW2];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_2);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_first_ext_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU first EXT DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_first_ext_dw_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_3.gtpu_dw_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU;
+
+ if (!(caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)) {
+ mlx5hws_err(cd->ctx, "Unsupported GTPU DW0 flex parser\n");
+ return -EOPNOTSUPP;
+ }
+
+ curr_fc = &fc[MLX5HWS_DEFINER_FNAME_GTPU_DW0];
+ curr_fc->tag_set = &hws_definer_generic_set;
+ curr_fc->bit_mask = -1;
+ curr_fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
+ HWS_CALC_HDR_SRC(fc, misc_parameters_3.gtpu_dw_0);
+ }
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc4(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ bool parser_is_used[HWS_NUM_OF_FLEX_PARSERS] = {};
+ struct mlx5hws_definer_fc *fc;
+ u32 id, value;
+
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_4.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc4 parameters set\n");
+ return -EINVAL;
+ }
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_0);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_0);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_0);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_1);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_1);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_1);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_2);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_2);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_2);
+
+ id = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_id_3);
+ value = HWS_GET_MATCH_PARAM(match_param, misc_parameters_4.prog_sample_field_value_3);
+ fc = hws_definer_misc4_fields_handler(cd, parser_is_used, id, value);
+ if (!fc)
+ return -EINVAL;
+
+ HWS_CALC_HDR_SRC(fc, misc_parameters_4.prog_sample_field_value_3);
+
+ return 0;
+}
+
+static int
+hws_definer_conv_misc5(struct mlx5hws_definer_conv_data *cd,
+ u32 *match_param)
+{
+ struct mlx5hws_definer_fc *fc = cd->fc;
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_0) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_1) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_2) ||
+ HWS_IS_FLD_SET(match_param, misc_parameters_5.macsec_tag_3) ||
+ HWS_IS_FLD_SET_SZ(match_param, misc_parameters_5.reserved_at_100, 0x100)) {
+ mlx5hws_err(cd->ctx, "Unsupported misc5 parameters set\n");
+ return -EINVAL;
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_0)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_0,
+ misc_parameters_5.tunnel_header_0, tunnel_header.tunnel_header_0);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_1)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_1,
+ misc_parameters_5.tunnel_header_1, tunnel_header.tunnel_header_1);
+ }
+
+ if (HWS_IS_FLD_SET(match_param, misc_parameters_5.tunnel_header_2)) {
+ cd->match_flags |= MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2;
+ HWS_SET_HDR(fc, match_param, TNL_HDR_2,
+ misc_parameters_5.tunnel_header_2, tunnel_header.tunnel_header_2);
+ }
+
+ HWS_SET_HDR(fc, match_param, TNL_HDR_3,
+ misc_parameters_5.tunnel_header_3, tunnel_header.tunnel_header_3);
+
+ return 0;
+}
+
+static int hws_definer_get_fc_size(struct mlx5hws_definer_fc *fc)
+{
+ u32 fc_sz = 0;
+ int i;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (fc == ZERO_SIZE_PTR)
+ return 0;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++)
+ if (fc[i].tag_set)
+ fc_sz++;
+ return fc_sz;
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_compressed_fc(struct mlx5hws_definer_fc *fc)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ u32 definer_size = hws_definer_get_fc_size(fc);
+ u32 fc_sz = 0;
+ int i;
+
+ compressed_fc = kcalloc(definer_size, sizeof(*compressed_fc), GFP_KERNEL);
+ if (!compressed_fc)
+ return NULL;
+
+ /* For empty matcher, ZERO_SIZE_PTR is returned */
+ if (!definer_size)
+ return compressed_fc;
+
+ for (i = 0, fc_sz = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ fc[i].fname = i;
+ memcpy(&compressed_fc[fc_sz++], &fc[i], sizeof(*compressed_fc));
+ }
+
+ return compressed_fc;
+}
+
+static void
+hws_definer_set_hl(u8 *hl, struct mlx5hws_definer_fc *fc)
+{
+ int i;
+
+ /* nothing to do for empty matcher */
+ if (fc == ZERO_SIZE_PTR)
+ return;
+
+ for (i = 0; i < MLX5HWS_DEFINER_FNAME_MAX; i++) {
+ if (!fc[i].tag_set)
+ continue;
+
+ HWS_SET32(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
+ }
+}
+
+static struct mlx5hws_definer_fc *
+hws_definer_alloc_fc(struct mlx5hws_context *ctx,
+ size_t len)
+{
+ struct mlx5hws_definer_fc *fc;
+ int i;
+
+ fc = kcalloc(len, sizeof(*fc), GFP_KERNEL);
+ if (!fc)
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ fc[i].ctx = ctx;
+
+ return fc;
+}
+
+static int
+hws_definer_conv_match_params_to_hl(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ u8 *hl)
+{
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return -ENOMEM;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6) {
+ mlx5hws_err(ctx, "Unsupported match_criteria_enable provided\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (mt->match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, mt->match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Check there is no conflicted fields set together */
+ ret = hws_definer_check_match_flags(&cd);
+ if (ret)
+ goto err_free_fc;
+
+ /* Allocate fc array on mt */
+ mt->fc = hws_definer_alloc_compressed_fc(fc);
+ if (!mt->fc) {
+ mlx5hws_err(ctx,
+ "Convert match params: failed to set field copy to match template\n");
+ ret = -ENOMEM;
+ goto err_free_fc;
+ }
+ mt->fc_sz = hws_definer_get_fc_size(fc);
+
+ /* Fill in headers layout */
+ hws_definer_set_hl(hl, fc);
+
+ kfree(fc);
+ return 0;
+
+err_free_fc:
+ kfree(fc);
+ return ret;
+}
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz)
+{
+ struct mlx5hws_definer_fc *compressed_fc = NULL;
+ struct mlx5hws_definer_conv_data cd = {0};
+ struct mlx5hws_definer_fc *fc;
+ int ret;
+
+ fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
+ if (!fc)
+ return NULL;
+
+ cd.fc = fc;
+ cd.ctx = ctx;
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
+ ret = hws_definer_conv_outer(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
+ ret = hws_definer_conv_inner(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
+ ret = hws_definer_conv_misc(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
+ ret = hws_definer_conv_misc2(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
+ ret = hws_definer_conv_misc3(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
+ ret = hws_definer_conv_misc4(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
+ ret = hws_definer_conv_misc5(&cd, match_param);
+ if (ret)
+ goto err_free_fc;
+ }
+
+ /* Allocate fc array on mt */
+ compressed_fc = hws_definer_alloc_compressed_fc(fc);
+ if (!compressed_fc) {
+ mlx5hws_err(ctx,
+ "Convert to compressed fc: failed to set field copy to match template\n");
+ goto err_free_fc;
+ }
+ *fc_sz = hws_definer_get_fc_size(fc);
+
+err_free_fc:
+ kfree(fc);
+ return compressed_fc;
+}
+
+static int
+hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
+ u32 hl_byte_off,
+ u32 *tag_byte_off)
+{
+ int i, dw_to_scan;
+ u8 byte_offset;
+
+ /* Avoid accessing unused DW selectors */
+ dw_to_scan = mlx5hws_definer_is_jumbo(definer) ?
+ DW_SELECTORS : DW_SELECTORS_MATCH;
+
+ /* Add offset since each DW covers multiple BYTEs */
+ byte_offset = hl_byte_off % DW_SIZE;
+ for (i = 0; i < dw_to_scan; i++) {
+ if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
+ *tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ /* Add offset to skip DWs in definer */
+ byte_offset = DW_SIZE * DW_SELECTORS;
+ /* Iterate in reverse since the code uses bytes from 7 -> 0 */
+ for (i = BYTE_SELECTORS; i-- > 0 ;) {
+ if (definer->byte_selector[i] == hl_byte_off) {
+ *tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+hws_definer_fc_bind(struct mlx5hws_definer *definer,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz)
+{
+ u32 tag_offset = 0;
+ int ret, byte_diff;
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ /* Map header layout byte offset to byte offset in tag */
+ ret = hws_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
+ if (ret)
+ return ret;
+
+ /* Move setter based on the location in the definer */
+ byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
+ fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
+
+ /* Update offset in headers layout to offset in tag */
+ fc->byte_off = tag_offset;
+ fc++;
+ }
+
+ return 0;
+}
+
+static bool
+hws_definer_best_hl_fit_recu(struct mlx5hws_definer_sel_ctrl *ctrl,
+ u32 cur_dw,
+ u32 *data)
+{
+ u8 bytes_set;
+ int byte_idx;
+ bool ret;
+ int i;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+
+ /* No data set, can skip to next DW */
+ while (!*data) {
+ cur_dw++;
+ data++;
+
+ /* Reached end, nothing left to do */
+ if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
+ return true;
+ }
+
+ /* Used all DW selectors and Byte selectors, no possible solution */
+ if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
+ ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
+ ctrl->allowed_bytes == ctrl->used_bytes)
+ return false;
+
+ /* Try to use limited DW selectors */
+ if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
+ ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
+ }
+
+ /* Try to use DW selectors */
+ if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
+ ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
+ }
+
+ /* No byte selector for offset bigger than 255 */
+ if (cur_dw * DW_SIZE > 255)
+ return false;
+
+ bytes_set = !!(0x000000ff & *data) +
+ !!(0x0000ff00 & *data) +
+ !!(0x00ff0000 & *data) +
+ !!(0xff000000 & *data);
+
+ /* Check if there are enough byte selectors left */
+ if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
+ return false;
+
+ /* Try to use Byte selectors */
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff000000 >> (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ /* Use byte selectors high to low */
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
+ ctrl->used_bytes++;
+ }
+
+ ret = hws_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DW_SIZE; i++)
+ if ((0xff << (i * BITS_IN_BYTE)) & be32_to_cpu((__force __be32)*data)) {
+ ctrl->used_bytes--;
+ byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
+ ctrl->byte_selector[byte_idx] = 0;
+ }
+
+ return false;
+}
+
+static void
+hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
+ struct mlx5hws_definer *definer)
+{
+ memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
+ memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
+ memcpy(definer->dw_selector + ctrl->allowed_full_dw,
+ ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
+}
+
+static int
+hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer,
+ u8 *hl)
+{
+ struct mlx5hws_definer_sel_ctrl ctrl = {0};
+ bool found;
+
+ /* Try to create a match definer */
+ ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = 0;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_MATCH;
+ return 0;
+ }
+
+ /* Try to create a full/limited jumbo definer */
+ ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
+ DW_SELECTORS_MATCH;
+ ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
+ DW_SELECTORS_LIMITED;
+ ctrl.allowed_bytes = BYTE_SELECTORS;
+
+ found = hws_definer_best_hl_fit_recu(&ctrl, 0, (u32 *)hl);
+ if (found) {
+ hws_definer_copy_sel_ctrl(&ctrl, definer);
+ definer->type = MLX5HWS_DEFINER_TYPE_JUMBO;
+ return 0;
+ }
+
+ return E2BIG;
+}
+
+static void
+hws_definer_create_tag_mask(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ if (fc->tag_mask_set)
+ fc->tag_mask_set(fc, match_param, tag);
+ else
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag)
+{
+ u32 i;
+
+ for (i = 0; i < fc_sz; i++) {
+ fc->tag_set(fc, match_param, tag);
+ fc++;
+ }
+}
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer)
+{
+ return definer->obj_id;
+}
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b)
+{
+ int i;
+
+ /* Future: Optimize by comparing selectors with valid mask only */
+ for (i = 0; i < BYTE_SELECTORS; i++)
+ if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
+ return 1;
+
+ for (i = 0; i < DW_SELECTORS; i++)
+ if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
+ return 1;
+
+ for (i = 0; i < MLX5HWS_JUMBO_TAG_SZ; i++)
+ if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
+ return 1;
+
+ return 0;
+}
+
+int
+mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer)
+{
+ u8 *match_hl;
+ int ret;
+
+ /* Union header-layout (hl) is used for creating a single definer
+ * field layout used with different bitmasks for hash and match.
+ */
+ match_hl = kzalloc(MLX5_ST_SZ_BYTES(definer_hl), GFP_KERNEL);
+ if (!match_hl)
+ return -ENOMEM;
+
+ /* Convert all mt items to header layout (hl)
+ * and allocate the match and range field copy array (fc & fcr).
+ */
+ ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to convert items to header layout\n");
+ goto free_fc;
+ }
+
+ /* Find the match definer layout for header layout match union */
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_dbg(ctx,
+ "Failed to create match definer from header layout - E2BIG\n");
+ else
+ mlx5hws_err(ctx,
+ "Failed to create match definer from header layout (%d)\n",
+ ret);
+ goto free_fc;
+ }
+
+ kfree(match_hl);
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+
+ kfree(match_hl);
+ return ret;
+}
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache)
+{
+ struct mlx5hws_definer_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->list_head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache)
+{
+ kfree(cache);
+}
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ struct mlx5hws_definer_cache *cache = ctx->definer_cache;
+ struct mlx5hws_cmd_definer_create_attr def_attr = {0};
+ struct mlx5hws_definer_cache_item *cached_definer;
+ u32 obj_id;
+ int ret;
+
+ /* Search definer cache for requested definer */
+ list_for_each_entry(cached_definer, &cache->list_head, list_node) {
+ if (mlx5hws_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ list_del_init(&cached_definer->list_node);
+ list_add(&cached_definer->list_node, &cache->list_head);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj_id;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ ret = mlx5hws_cmd_definer_create(ctx->mdev, &def_attr, &obj_id);
+ if (ret)
+ return -1;
+
+ cached_definer = kzalloc(sizeof(*cached_definer), GFP_KERNEL);
+ if (!cached_definer)
+ goto free_definer_obj;
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj_id = obj_id;
+ cached_definer->refcount = 1;
+ list_add(&cached_definer->list_node, &cache->list_head);
+
+ return obj_id;
+
+free_definer_obj:
+ mlx5hws_cmd_definer_destroy(ctx->mdev, obj_id);
+ return -1;
+}
+
+static void
+hws_definer_put_obj(struct mlx5hws_context *ctx, u32 obj_id)
+{
+ struct mlx5hws_definer_cache_item *cached_definer;
+
+ list_for_each_entry(cached_definer, &ctx->definer_cache->list_head, list_node) {
+ if (cached_definer->definer.obj_id != obj_id)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ list_del_init(&cached_definer->list_node);
+ mlx5hws_cmd_definer_destroy(ctx->mdev, cached_definer->definer.obj_id);
+ kfree(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ pr_warn("HWS: failed putting definer object\n");
+}
+
+static struct mlx5hws_definer *
+hws_definer_alloc(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer_fc *fc,
+ int fc_sz,
+ u32 *match_param,
+ struct mlx5hws_definer *layout,
+ bool bind_fc)
+{
+ struct mlx5hws_definer *definer;
+ int ret;
+
+ definer = kmemdup(layout, sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return NULL;
+
+ /* Align field copy array based on given layout */
+ if (bind_fc) {
+ ret = hws_definer_fc_bind(definer, fc, fc_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to bind field copy to definer\n");
+ goto free_definer;
+ }
+ }
+
+ /* Create the tag mask used for definer creation */
+ hws_definer_create_tag_mask(match_param, fc, fc_sz, definer->mask.jumbo);
+
+ ret = mlx5hws_definer_get_obj(ctx, definer);
+ if (ret < 0)
+ goto free_definer;
+
+ definer->obj_id = ret;
+ return definer;
+
+free_definer:
+ kfree(definer);
+ return NULL;
+}
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer)
+{
+ hws_definer_put_obj(ctx, definer->obj_id);
+ kfree(definer);
+}
+
+static int
+hws_definer_mt_match_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_layout)
+{
+ /* Create mandatory match definer */
+ mt->definer = hws_definer_alloc(ctx,
+ mt->fc,
+ mt->fc_sz,
+ mt->match_param,
+ match_layout,
+ true);
+ if (!mt->definer) {
+ mlx5hws_err(ctx, "Failed to create match definer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hws_definer_mt_match_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ mlx5hws_definer_free(ctx, mt->definer);
+}
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ struct mlx5hws_definer match_layout = {0};
+ int ret;
+
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
+ return ret;
+ }
+
+ /* Calculate definers needed for exact match */
+ ret = hws_definer_mt_match_init(ctx, mt, &match_layout);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to init match definers\n");
+ goto free_fc;
+ }
+
+ return 0;
+
+free_fc:
+ kfree(mt->fc);
+ return ret;
+}
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt)
+{
+ hws_definer_mt_match_uninit(ctx, mt);
+ kfree(mt->fc);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
new file mode 100644
index 000000000000..2f6a7df4021c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_definer.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_DEFINER_H_
+#define MLX5HWS_DEFINER_H_
+
+/* Max available selecotrs */
+#define DW_SELECTORS 9
+#define BYTE_SELECTORS 8
+
+/* Selectors based on match TAG */
+#define DW_SELECTORS_MATCH 6
+#define DW_SELECTORS_LIMITED 3
+
+/* Selectors based on range TAG */
+#define DW_SELECTORS_RANGE 2
+#define BYTE_SELECTORS_RANGE 8
+
+#define HWS_NUM_OF_FLEX_PARSERS 8
+
+enum mlx5hws_definer_fname {
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O,
+ MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_ID_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O,
+ MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_IHL_I,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_O,
+ MLX5HWS_DEFINER_FNAME_IP_DSCP_I,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_O,
+ MLX5HWS_DEFINER_FNAME_IP_ECN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_O,
+ MLX5HWS_DEFINER_FNAME_IP_TTL_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_DST_I,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_O,
+ MLX5HWS_DEFINER_FNAME_IPV4_SRC_I,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_O,
+ MLX5HWS_DEFINER_FNAME_IP_VERSION_I,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_IP_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_O,
+ MLX5HWS_DEFINER_FNAME_IP_LEN_I,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_O,
+ MLX5HWS_DEFINER_FNAME_IP_TOS_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I,
+ MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O,
+ MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_SPORT_I,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_O,
+ MLX5HWS_DEFINER_FNAME_L4_DPORT_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_I,
+ MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O,
+ MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM,
+ MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM,
+ MLX5HWS_DEFINER_FNAME_GTP_TEID,
+ MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_FLAG,
+ MLX5HWS_DEFINER_FNAME_GTP_NEXT_EXT_HDR,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_PDU,
+ MLX5HWS_DEFINER_FNAME_GTP_EXT_HDR_QFI,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0,
+ MLX5HWS_DEFINER_FNAME_GTPU_DW2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7,
+ MLX5HWS_DEFINER_FNAME_VPORT_REG_C_0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI,
+ MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OAM,
+ MLX5HWS_DEFINER_FNAME_GENEVE_PROTO,
+ MLX5HWS_DEFINER_FNAME_GENEVE_VNI,
+ MLX5HWS_DEFINER_FNAME_SOURCE_QP,
+ MLX5HWS_DEFINER_FNAME_SOURCE_GVMI,
+ MLX5HWS_DEFINER_FNAME_REG_0,
+ MLX5HWS_DEFINER_FNAME_REG_1,
+ MLX5HWS_DEFINER_FNAME_REG_2,
+ MLX5HWS_DEFINER_FNAME_REG_3,
+ MLX5HWS_DEFINER_FNAME_REG_4,
+ MLX5HWS_DEFINER_FNAME_REG_5,
+ MLX5HWS_DEFINER_FNAME_REG_6,
+ MLX5HWS_DEFINER_FNAME_REG_7,
+ MLX5HWS_DEFINER_FNAME_REG_8,
+ MLX5HWS_DEFINER_FNAME_REG_9,
+ MLX5HWS_DEFINER_FNAME_REG_10,
+ MLX5HWS_DEFINER_FNAME_REG_11,
+ MLX5HWS_DEFINER_FNAME_REG_A,
+ MLX5HWS_DEFINER_FNAME_REG_B,
+ MLX5HWS_DEFINER_FNAME_GRE_KEY_PRESENT,
+ MLX5HWS_DEFINER_FNAME_GRE_C,
+ MLX5HWS_DEFINER_FNAME_GRE_K,
+ MLX5HWS_DEFINER_FNAME_GRE_S,
+ MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_SEQ,
+ MLX5HWS_DEFINER_FNAME_GRE_OPT_CHECKSUM,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_O,
+ MLX5HWS_DEFINER_FNAME_INTEGRITY_I,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW1,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW2,
+ MLX5HWS_DEFINER_FNAME_ICMP_DW3,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SPI,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SEQUENCE_NUMBER,
+ MLX5HWS_DEFINER_FNAME_IPSEC_SYNDROME,
+ MLX5HWS_DEFINER_FNAME_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK,
+ MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_O,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS0_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS1_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS2_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS3_I,
+ MLX5HWS_DEFINER_FNAME_OKS2_MPLS4_I,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_OK_7,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_0,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_1,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_2,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_3,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_4,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_5,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_6,
+ MLX5HWS_DEFINER_FNAME_GENEVE_OPT_DW_7,
+ MLX5HWS_DEFINER_FNAME_IB_L4_OPCODE,
+ MLX5HWS_DEFINER_FNAME_IB_L4_QPN,
+ MLX5HWS_DEFINER_FNAME_IB_L4_A,
+ MLX5HWS_DEFINER_FNAME_RANDOM_NUM,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L2_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L3_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_L4_EXT_I,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_O,
+ MLX5HWS_DEFINER_FNAME_PTYPE_FRAG_I,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_0,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_1,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_2,
+ MLX5HWS_DEFINER_FNAME_TNL_HDR_3,
+ MLX5HWS_DEFINER_FNAME_MAX,
+};
+
+enum mlx5hws_definer_match_criteria {
+ MLX5HWS_DEFINER_MATCH_CRITERIA_EMPTY = 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_INNER = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2 = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3 = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4 = 1 << 5,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5 = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_CRITERIA_MISC6 = 1 << 7,
+};
+
+enum mlx5hws_definer_type {
+ MLX5HWS_DEFINER_TYPE_MATCH,
+ MLX5HWS_DEFINER_TYPE_JUMBO,
+};
+
+enum mlx5hws_definer_match_flag {
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN_GPE = 1 << 0,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GENEVE = 1 << 1,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GTPU = 1 << 2,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE = 1 << 3,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_VXLAN = 1 << 4,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_0_1 = 1 << 5,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_GRE_OPT_KEY = 1 << 6,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_HEADER_2 = 1 << 7,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_GRE = 1 << 8,
+ MLX5HWS_DEFINER_MATCH_FLAG_TNL_MPLS_OVER_UDP = 1 << 9,
+
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV4 = 1 << 10,
+ MLX5HWS_DEFINER_MATCH_FLAG_ICMPV6 = 1 << 11,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_O = 1 << 12,
+ MLX5HWS_DEFINER_MATCH_FLAG_TCP_I = 1 << 13,
+};
+
+struct mlx5hws_definer_fc {
+ struct mlx5hws_context *ctx;
+ /* Source */
+ u32 s_byte_off;
+ int s_bit_off;
+ u32 s_bit_mask;
+ /* Destination */
+ u32 byte_off;
+ int bit_off;
+ u32 bit_mask;
+ enum mlx5hws_definer_fname fname;
+ void (*tag_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+ void (*tag_mask_set)(struct mlx5hws_definer_fc *fc,
+ void *mach_param,
+ u8 *tag);
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_bits {
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+ u8 reserved_at_40[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encap_type[0x2];
+ u8 port_number[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 l4_type[0x4];
+ u8 reserved_at_64[0x2];
+ u8 ipsec_layer[0x2];
+ u8 l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_definer_hl_eth_l2_src_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 ip_fragmented[0x1];
+ u8 functional_lb[0x1];
+};
+
+struct mlx5_ifc_definer_hl_ib_l2_bits {
+ u8 sx_sniffer[0x1];
+ u8 force_lb[0x1];
+ u8 functional_lb[0x1];
+ u8 reserved_at_3[0x3];
+ u8 port_number[0x2];
+ u8 sl[0x4];
+ u8 qp_type[0x2];
+ u8 lnh[0x2];
+ u8 dlid[0x10];
+ u8 vl[0x4];
+ u8 lrh_packet_length[0xc];
+ u8 slid[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l3_bits {
+ u8 ip_version[0x4];
+ u8 ihl[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 time_to_live_hop_limit[0x8];
+ u8 protocol_next_header[0x8];
+ u8 identification[0x10];
+ union {
+ u8 ipv4_frag[0x10];
+ struct {
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+ };
+ };
+ u8 ipv4_total_length[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_60[0xc];
+ u8 flow_label[0x14];
+ u8 packet_length[0x10];
+ u8 ipv6_payload_length[0x10];
+};
+
+struct mlx5_ifc_definer_hl_eth_l4_bits {
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+ u8 data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 ip_fragmented[0x1];
+ u8 tcp_ns[0x1];
+ union {
+ u8 tcp_flags[0x8];
+ struct {
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ };
+ };
+ u8 first_fragment[0x1];
+ u8 reserved_at_31[0xf];
+};
+
+struct mlx5_ifc_definer_hl_src_qp_gvmi_bits {
+ u8 loopback_syndrome[0x8];
+ u8 l3_type[0x2];
+ u8 l4_type_bwc[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 reserved_at_e[0x1];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+ u8 force_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+};
+
+struct mlx5_ifc_definer_hl_ib_l4_bits {
+ u8 opcode[0x8];
+ u8 qp[0x18];
+ u8 se[0x1];
+ u8 migreq[0x1];
+ u8 ackreq[0x1];
+ u8 fecn[0x1];
+ u8 becn[0x1];
+ u8 bth[0x1];
+ u8 deth[0x1];
+ u8 dcceth[0x1];
+ u8 reserved_at_28[0x2];
+ u8 pad_count[0x2];
+ u8 tver[0x4];
+ u8 p_key[0x10];
+ u8 reserved_at_40[0x8];
+ u8 deth_source_qp[0x18];
+};
+
+enum mlx5hws_integrity_ok1_bits {
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_OK = 24,
+ MLX5HWS_DEFINER_OKS1_FIRST_L3_OK = 25,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_OK = 26,
+ MLX5HWS_DEFINER_OKS1_SECOND_L3_OK = 27,
+ MLX5HWS_DEFINER_OKS1_FIRST_L4_CSUM_OK = 28,
+ MLX5HWS_DEFINER_OKS1_FIRST_IPV4_CSUM_OK = 29,
+ MLX5HWS_DEFINER_OKS1_SECOND_L4_CSUM_OK = 30,
+ MLX5HWS_DEFINER_OKS1_SECOND_IPV4_CSUM_OK = 31,
+};
+
+struct mlx5_ifc_definer_hl_oks1_bits {
+ union {
+ u8 oks1_bits[0x20];
+ struct {
+ u8 second_ipv4_checksum_ok[0x1];
+ u8 second_l4_checksum_ok[0x1];
+ u8 first_ipv4_checksum_ok[0x1];
+ u8 first_l4_checksum_ok[0x1];
+ u8 second_l3_ok[0x1];
+ u8 second_l4_ok[0x1];
+ u8 first_l3_ok[0x1];
+ u8 first_l4_ok[0x1];
+ u8 flex_parser7_steering_ok[0x1];
+ u8 flex_parser6_steering_ok[0x1];
+ u8 flex_parser5_steering_ok[0x1];
+ u8 flex_parser4_steering_ok[0x1];
+ u8 flex_parser3_steering_ok[0x1];
+ u8 flex_parser2_steering_ok[0x1];
+ u8 flex_parser1_steering_ok[0x1];
+ u8 flex_parser0_steering_ok[0x1];
+ u8 second_ipv6_extension_header_vld[0x1];
+ u8 first_ipv6_extension_header_vld[0x1];
+ u8 l3_tunneling_ok[0x1];
+ u8 l2_tunneling_ok[0x1];
+ u8 second_tcp_ok[0x1];
+ u8 second_udp_ok[0x1];
+ u8 second_ipv4_ok[0x1];
+ u8 second_ipv6_ok[0x1];
+ u8 second_l2_ok[0x1];
+ u8 vxlan_ok[0x1];
+ u8 gre_ok[0x1];
+ u8 first_tcp_ok[0x1];
+ u8 first_udp_ok[0x1];
+ u8 first_ipv4_ok[0x1];
+ u8 first_ipv6_ok[0x1];
+ u8 first_l2_ok[0x1];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_oks2_bits {
+ u8 reserved_at_0[0xa];
+ u8 second_mpls_ok[0x1];
+ u8 second_mpls4_s_bit[0x1];
+ u8 second_mpls4_qualifier[0x1];
+ u8 second_mpls3_s_bit[0x1];
+ u8 second_mpls3_qualifier[0x1];
+ u8 second_mpls2_s_bit[0x1];
+ u8 second_mpls2_qualifier[0x1];
+ u8 second_mpls1_s_bit[0x1];
+ u8 second_mpls1_qualifier[0x1];
+ u8 second_mpls0_s_bit[0x1];
+ u8 second_mpls0_qualifier[0x1];
+ u8 first_mpls_ok[0x1];
+ u8 first_mpls4_s_bit[0x1];
+ u8 first_mpls4_qualifier[0x1];
+ u8 first_mpls3_s_bit[0x1];
+ u8 first_mpls3_qualifier[0x1];
+ u8 first_mpls2_s_bit[0x1];
+ u8 first_mpls2_qualifier[0x1];
+ u8 first_mpls1_s_bit[0x1];
+ u8 first_mpls1_qualifier[0x1];
+ u8 first_mpls0_s_bit[0x1];
+ u8 first_mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_definer_hl_voq_bits {
+ u8 reserved_at_0[0x18];
+ u8 ecn_ok[0x1];
+ u8 congestion[0x1];
+ u8 profile[0x2];
+ u8 internal_prio[0x4];
+};
+
+struct mlx5_ifc_definer_hl_ipv4_src_dst_bits {
+ u8 source_address[0x20];
+ u8 destination_address[0x20];
+};
+
+struct mlx5_ifc_definer_hl_random_number_bits {
+ u8 random_number[0x10];
+ u8 reserved[0x10];
+};
+
+struct mlx5_ifc_definer_hl_ipv6_addr_bits {
+ u8 ipv6_address_127_96[0x20];
+ u8 ipv6_address_95_64[0x20];
+ u8 ipv6_address_63_32[0x20];
+ u8 ipv6_address_31_0[0x20];
+};
+
+struct mlx5_ifc_definer_tcp_icmp_header_bits {
+ union {
+ struct {
+ u8 icmp_dw1[0x20];
+ u8 icmp_dw2[0x20];
+ u8 icmp_dw3[0x20];
+ };
+ struct {
+ u8 tcp_seq[0x20];
+ u8 tcp_ack[0x20];
+ u8 tcp_win_urg[0x20];
+ };
+ };
+};
+
+struct mlx5_ifc_definer_hl_tunnel_header_bits {
+ u8 tunnel_header_0[0x20];
+ u8 tunnel_header_1[0x20];
+ u8 tunnel_header_2[0x20];
+ u8 tunnel_header_3[0x20];
+};
+
+struct mlx5_ifc_definer_hl_ipsec_bits {
+ u8 spi[0x20];
+ u8 sequence_number[0x20];
+ u8 reserved[0x10];
+ u8 ipsec_syndrome[0x8];
+ u8 next_header[0x8];
+};
+
+struct mlx5_ifc_definer_hl_metadata_bits {
+ u8 metadata_to_cqe[0x20];
+ u8 general_purpose[0x20];
+ u8 acomulated_hash[0x20];
+};
+
+struct mlx5_ifc_definer_hl_flex_parser_bits {
+ u8 flex_parser_7[0x20];
+ u8 flex_parser_6[0x20];
+ u8 flex_parser_5[0x20];
+ u8 flex_parser_4[0x20];
+ u8 flex_parser_3[0x20];
+ u8 flex_parser_2[0x20];
+ u8 flex_parser_1[0x20];
+ u8 flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_definer_hl_registers_bits {
+ u8 register_c_10[0x20];
+ u8 register_c_11[0x20];
+ u8 register_c_8[0x20];
+ u8 register_c_9[0x20];
+ u8 register_c_6[0x20];
+ u8 register_c_7[0x20];
+ u8 register_c_4[0x20];
+ u8 register_c_5[0x20];
+ u8 register_c_2[0x20];
+ u8 register_c_3[0x20];
+ u8 register_c_0[0x20];
+ u8 register_c_1[0x20];
+};
+
+struct mlx5_ifc_definer_hl_mpls_bits {
+ u8 mpls0_label[0x20];
+ u8 mpls1_label[0x20];
+ u8 mpls2_label[0x20];
+ u8 mpls3_label[0x20];
+ u8 mpls4_label[0x20];
+};
+
+struct mlx5_ifc_definer_hl_bits {
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_bits eth_l2_inner;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_outer;
+ struct mlx5_ifc_definer_hl_eth_l2_src_bits eth_l2_src_inner;
+ struct mlx5_ifc_definer_hl_ib_l2_bits ib_l2;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_outer;
+ struct mlx5_ifc_definer_hl_eth_l3_bits eth_l3_inner;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_outer;
+ struct mlx5_ifc_definer_hl_eth_l4_bits eth_l4_inner;
+ struct mlx5_ifc_definer_hl_src_qp_gvmi_bits source_qp_gvmi;
+ struct mlx5_ifc_definer_hl_ib_l4_bits ib_l4;
+ struct mlx5_ifc_definer_hl_oks1_bits oks1;
+ struct mlx5_ifc_definer_hl_oks2_bits oks2;
+ struct mlx5_ifc_definer_hl_voq_bits voq;
+ u8 reserved_at_480[0x380];
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_outer;
+ struct mlx5_ifc_definer_hl_ipv4_src_dst_bits ipv4_src_dest_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_dst_inner;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_outer;
+ struct mlx5_ifc_definer_hl_ipv6_addr_bits ipv6_src_inner;
+ u8 unsupported_dest_ib_l3[0x80];
+ u8 unsupported_source_ib_l3[0x80];
+ u8 unsupported_udp_misc_outer[0x20];
+ u8 unsupported_udp_misc_inner[0x20];
+ struct mlx5_ifc_definer_tcp_icmp_header_bits tcp_icmp;
+ struct mlx5_ifc_definer_hl_tunnel_header_bits tunnel_header;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_outer;
+ struct mlx5_ifc_definer_hl_mpls_bits mpls_inner;
+ u8 unsupported_config_headers_outer[0x80];
+ u8 unsupported_config_headers_inner[0x80];
+ struct mlx5_ifc_definer_hl_random_number_bits random_number;
+ struct mlx5_ifc_definer_hl_ipsec_bits ipsec;
+ struct mlx5_ifc_definer_hl_metadata_bits metadata;
+ u8 unsupported_utc_timestamp[0x40];
+ u8 unsupported_free_running_timestamp[0x40];
+ struct mlx5_ifc_definer_hl_flex_parser_bits flex_parser;
+ struct mlx5_ifc_definer_hl_registers_bits registers;
+ /* Reserved in case header layout on future HW */
+ u8 unsupported_reserved[0xd40];
+};
+
+enum mlx5hws_definer_gtp {
+ MLX5HWS_DEFINER_GTP_EXT_HDR_BIT = 0x04,
+};
+
+struct mlx5_ifc_header_gtp_bits {
+ u8 version[0x3];
+ u8 proto_type[0x1];
+ u8 reserved1[0x1];
+ union {
+ u8 msg_flags[0x3];
+ struct {
+ u8 ext_hdr_flag[0x1];
+ u8 seq_num_flag[0x1];
+ u8 pdu_flag[0x1];
+ };
+ };
+ u8 msg_type[0x8];
+ u8 msg_len[0x8];
+ u8 teid[0x20];
+};
+
+struct mlx5_ifc_header_opt_gtp_bits {
+ u8 seq_num[0x10];
+ u8 pdu_num[0x8];
+ u8 next_ext_hdr_type[0x8];
+};
+
+struct mlx5_ifc_header_gtp_psc_bits {
+ u8 len[0x8];
+ u8 pdu_type[0x4];
+ u8 flags[0x4];
+ u8 qfi[0x8];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_ipv6_vtc_bits {
+ u8 version[0x4];
+ union {
+ u8 tos[0x8];
+ struct {
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ };
+ };
+ u8 flow_label[0x14];
+};
+
+struct mlx5_ifc_header_ipv6_routing_ext_bits {
+ u8 next_hdr[0x8];
+ u8 hdr_len[0x8];
+ u8 type[0x8];
+ u8 segments_left[0x8];
+ union {
+ u8 flags[0x20];
+ struct {
+ u8 last_entry[0x8];
+ u8 flag[0x8];
+ u8 tag[0x10];
+ };
+ };
+};
+
+struct mlx5_ifc_header_vxlan_bits {
+ u8 flags[0x8];
+ u8 reserved1[0x18];
+ u8 vni[0x18];
+ u8 reserved2[0x8];
+};
+
+struct mlx5_ifc_header_vxlan_gpe_bits {
+ u8 flags[0x8];
+ u8 rsvd0[0x10];
+ u8 protocol[0x8];
+ u8 vni[0x18];
+ u8 rsvd1[0x8];
+};
+
+struct mlx5_ifc_header_gre_bits {
+ union {
+ u8 c_rsvd0_ver[0x10];
+ struct {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 reserved_at_4[0x9];
+ u8 version[0x3];
+ };
+ };
+ u8 gre_protocol[0x10];
+ u8 checksum[0x10];
+ u8 reserved_at_30[0x10];
+};
+
+struct mlx5_ifc_header_geneve_bits {
+ union {
+ u8 ver_opt_len_o_c_rsvd[0x10];
+ struct {
+ u8 version[0x2];
+ u8 opt_len[0x6];
+ u8 o_flag[0x1];
+ u8 c_flag[0x1];
+ u8 reserved_at_a[0x6];
+ };
+ };
+ u8 protocol_type[0x10];
+ u8 vni[0x18];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_header_geneve_opt_bits {
+ u8 class[0x10];
+ u8 type[0x8];
+ u8 reserved[0x3];
+ u8 len[0x5];
+};
+
+struct mlx5_ifc_header_icmp_bits {
+ union {
+ u8 icmp_dw1[0x20];
+ struct {
+ u8 type[0x8];
+ u8 code[0x8];
+ u8 cksum[0x10];
+ };
+ };
+ union {
+ u8 icmp_dw2[0x20];
+ struct {
+ u8 ident[0x10];
+ u8 seq_nb[0x10];
+ };
+ };
+};
+
+struct mlx5hws_definer {
+ enum mlx5hws_definer_type type;
+ u8 dw_selector[DW_SELECTORS];
+ u8 byte_selector[BYTE_SELECTORS];
+ struct mlx5hws_rule_match_tag mask;
+ u32 obj_id;
+};
+
+struct mlx5hws_definer_cache {
+ struct list_head list_head;
+};
+
+struct mlx5hws_definer_cache_item {
+ struct mlx5hws_definer definer;
+ u32 refcount;
+ struct list_head list_node;
+};
+
+static inline bool
+mlx5hws_definer_is_jumbo(struct mlx5hws_definer *definer)
+{
+ return (definer->type == MLX5HWS_DEFINER_TYPE_JUMBO);
+}
+
+void mlx5hws_definer_create_tag(u32 *match_param,
+ struct mlx5hws_definer_fc *fc,
+ u32 fc_sz,
+ u8 *tag);
+
+int mlx5hws_definer_get_id(struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+void mlx5hws_definer_mt_uninit(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt);
+
+int mlx5hws_definer_init_cache(struct mlx5hws_definer_cache **cache);
+
+void mlx5hws_definer_uninit_cache(struct mlx5hws_definer_cache *cache);
+
+int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
+ struct mlx5hws_definer *definer_b);
+
+int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+void mlx5hws_definer_free(struct mlx5hws_context *ctx,
+ struct mlx5hws_definer *definer);
+
+int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_definer *match_definer);
+
+struct mlx5hws_definer_fc *
+mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ u32 *match_param,
+ int *fc_sz);
+
+#endif /* MLX5HWS_DEFINER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
new file mode 100644
index 000000000000..5643be1cd5bf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_internal.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_INTERNAL_H_
+#define MLX5HWS_INTERNAL_H_
+
+#include <linux/mlx5/transobj.h>
+#include <linux/mlx5/vport.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+
+#include "mlx5hws_prm.h"
+#include "mlx5hws.h"
+#include "mlx5hws_pool.h"
+#include "mlx5hws_vport.h"
+#include "mlx5hws_context.h"
+#include "mlx5hws_table.h"
+#include "mlx5hws_send.h"
+#include "mlx5hws_rule.h"
+#include "mlx5hws_cmd.h"
+#include "mlx5hws_action.h"
+#include "mlx5hws_definer.h"
+#include "mlx5hws_matcher.h"
+#include "mlx5hws_debug.h"
+#include "mlx5hws_pat_arg.h"
+#include "mlx5hws_bwc.h"
+#include "mlx5hws_bwc_complex.h"
+
+#define W_SIZE 2
+#define DW_SIZE 4
+#define BITS_IN_BYTE 8
+#define BITS_IN_DW (BITS_IN_BYTE * DW_SIZE)
+
+#define IS_BIT_SET(_value, _bit) ((_value) & (1ULL << (_bit)))
+
+#define mlx5hws_err(ctx, arg...) mlx5_core_err((ctx)->mdev, ##arg)
+#define mlx5hws_info(ctx, arg...) mlx5_core_info((ctx)->mdev, ##arg)
+#define mlx5hws_dbg(ctx, arg...) mlx5_core_dbg((ctx)->mdev, ##arg)
+
+#define MLX5HWS_TABLE_TYPE_BASE 2
+#define MLX5HWS_ACTION_STE_IDX_ANY 0
+
+static inline bool is_mem_zero(const u8 *mem, size_t size)
+{
+ if (unlikely(!size)) {
+ pr_warn("HWS: invalid buffer of size 0 in %s\n", __func__);
+ return true;
+ }
+
+ return (*mem == 0) && memcmp(mem, mem + 1, size - 1) == 0;
+}
+
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
+#endif /* MLX5HWS_INTERNAL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
new file mode 100644
index 000000000000..33d2b31e4b46
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_matcher_rtc_type {
+ HWS_MATCHER_RTC_TYPE_MATCH,
+ HWS_MATCHER_RTC_TYPE_STE_ARRAY,
+ HWS_MATCHER_RTC_TYPE_MAX,
+};
+
+static const char * const mlx5hws_matcher_rtc_type_str[] = {
+ [HWS_MATCHER_RTC_TYPE_MATCH] = "MATCH",
+ [HWS_MATCHER_RTC_TYPE_STE_ARRAY] = "STE_ARRAY",
+ [HWS_MATCHER_RTC_TYPE_MAX] = "UNKNOWN",
+};
+
+static const char *hws_matcher_rtc_type_to_str(enum mlx5hws_matcher_rtc_type rtc_type)
+{
+ if (rtc_type > HWS_MATCHER_RTC_TYPE_MAX)
+ rtc_type = HWS_MATCHER_RTC_TYPE_MAX;
+ return mlx5hws_matcher_rtc_type_str[rtc_type];
+}
+
+static bool hws_matcher_requires_col_tbl(u8 log_num_of_rules)
+{
+ /* Collision table concatenation is done only for large rule tables */
+ return log_num_of_rules > MLX5HWS_MATCHER_ASSURED_RULES_TH;
+}
+
+static u8 hws_matcher_rules_to_tbl_depth(u8 log_num_of_rules)
+{
+ if (hws_matcher_requires_col_tbl(log_num_of_rules))
+ return MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH;
+
+ /* For small rule tables we use a single deep table to assure insertion */
+ return min(log_num_of_rules, MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH);
+}
+
+static void hws_matcher_destroy_end_ft(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_table_destroy_default_ft(matcher->tbl, matcher->end_ft_id);
+}
+
+static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ int ret;
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create matcher end flow table\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int hws_matcher_connect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *prev = NULL;
+ struct mlx5hws_matcher *next = NULL;
+ struct mlx5hws_matcher *tmp_matcher;
+ int ret;
+
+ /* Find location in matcher list */
+ if (list_empty(&tbl->matchers_list)) {
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ goto connect;
+ }
+
+ list_for_each_entry(tmp_matcher, &tbl->matchers_list, list_node) {
+ if (tmp_matcher->attr.priority > matcher->attr.priority) {
+ next = tmp_matcher;
+ break;
+ }
+ prev = tmp_matcher;
+ }
+
+ if (next)
+ /* insert before next */
+ list_add_tail(&matcher->list_node, &next->list_node);
+ else
+ /* insert after prev */
+ list_add(&matcher->list_node, &prev->list_node);
+
+connect:
+ if (next) {
+ /* Connect to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ matcher->end_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to next RTC\n");
+ goto remove_from_list;
+ }
+ } else {
+ /* Connect last matcher to next miss_tbl if exists */
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed connect new matcher to miss_tbl\n");
+ goto remove_from_list;
+ }
+ }
+
+ /* Connect to previous FT */
+ ret = mlx5hws_table_ft_set_next_rtc(ctx,
+ prev ? prev->end_ft_id : tbl->ft_id,
+ tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to connect new matcher to previous FT\n");
+ goto remove_from_list;
+ }
+
+ /* Reset prev matcher FT default miss (drop refcount) */
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev ? prev->end_ft_id : tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to reset matcher ft default miss\n");
+ goto remove_from_list;
+ }
+
+ if (!prev) {
+ /* Update tables missing to current matcher in the table */
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Fatal error, failed to update connected miss table\n");
+ goto remove_from_list;
+ }
+ }
+
+ return 0;
+
+remove_from_list:
+ list_del_init(&matcher->list_node);
+ return ret;
+}
+
+static int hws_matcher_disconnect(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher *next = NULL, *prev = NULL;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ u32 prev_ft_id = tbl->ft_id;
+ int ret;
+
+ if (!list_is_first(&matcher->list_node, &tbl->matchers_list)) {
+ prev = list_prev_entry(matcher, list_node);
+ prev_ft_id = prev->end_ft_id;
+ }
+
+ if (!list_is_last(&matcher->list_node, &tbl->matchers_list))
+ next = list_next_entry(matcher, list_node);
+
+ list_del_init(&matcher->list_node);
+
+ if (next) {
+ /* Connect previous end FT to next RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(tbl->ctx,
+ prev_ft_id,
+ tbl->fw_ft_type,
+ next->match_ste.rtc_0_id,
+ next->match_ste.rtc_1_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect matcher\n");
+ goto matcher_reconnect;
+ }
+ } else {
+ ret = mlx5hws_table_connect_to_miss_table(tbl, tbl->default_miss.miss_tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to disconnect last matcher\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ /* Removing first matcher, update connected miss tables if exists */
+ if (prev_ft_id == tbl->ft_id) {
+ ret = mlx5hws_table_update_connected_miss_tables(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to update connected miss table\n");
+ goto matcher_reconnect;
+ }
+ }
+
+ ret = mlx5hws_table_ft_set_default_next_ft(tbl, prev_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Fatal error, failed to restore matcher ft default miss\n");
+ goto matcher_reconnect;
+ }
+
+ return 0;
+
+matcher_reconnect:
+ if (list_empty(&tbl->matchers_list) || !prev)
+ list_add(&matcher->list_node, &tbl->matchers_list);
+ else
+ /* insert after prev matcher */
+ list_add(&matcher->list_node, &prev->list_node);
+
+ return ret;
+}
+
+static void hws_matcher_set_rtc_attr_sz(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ bool is_mirror)
+{
+ struct mlx5hws_pool_chunk *ste = &matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].ste;
+ enum mlx5hws_matcher_flow_src flow_src = matcher->attr.optimize_flow_src;
+ bool is_match_rtc = rtc_type == HWS_MATCHER_RTC_TYPE_MATCH;
+
+ if ((flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT && !is_mirror) ||
+ (flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE && is_mirror)) {
+ /* Optimize FDB RTC */
+ rtc_attr->log_size = 0;
+ rtc_attr->log_depth = 0;
+ } else {
+ /* Keep original values */
+ rtc_attr->log_size = is_match_rtc ? matcher->attr.table.sz_row_log : ste->order;
+ rtc_attr->log_depth = is_match_rtc ? matcher->attr.table.sz_col_log : 0;
+ }
+}
+
+static int hws_matcher_create_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_cmd_rtc_create_attr rtc_attr = {0};
+ struct mlx5hws_match_template *mt = matcher->mt;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_action_default_stc *default_stc;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool *ste_pool, *stc_pool;
+ struct mlx5hws_pool_chunk *ste;
+ u32 *rtc_0_id, *rtc_1_id;
+ u32 obj_id;
+ int ret;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = &matcher->match_ste.rtc_0_id;
+ rtc_1_id = &matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ ste->order = attr->table.sz_col_log + attr->table.sz_row_log;
+
+ rtc_attr.log_size = attr->table.sz_row_log;
+ rtc_attr.log_depth = attr->table.sz_col_log;
+ rtc_attr.is_frst_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ rtc_attr.is_scnd_range = 0;
+ rtc_attr.miss_ft_id = matcher->end_ft_id;
+
+ if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH) {
+ /* The usual Hash Table */
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH;
+
+ /* The first mt is used since all share the same definer */
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX) {
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ rtc_attr.num_hash_definer = 1;
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH;
+ rtc_attr.match_definer_0 = mlx5hws_definer_get_id(mt->definer);
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ rtc_attr.access_index_mode = MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR;
+ rtc_attr.match_definer_0 = ctx->caps->linear_match_definer;
+ }
+ }
+
+ /* Match pool requires implicit allocation */
+ ret = mlx5hws_pool_chunk_alloc(ste_pool, ste);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate STE for %s RTC",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ return ret;
+ }
+ break;
+
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ rtc_0_id = &action_ste->rtc_0_id;
+ rtc_1_id = &action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ ste->order = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ attr->table.sz_row_log;
+ rtc_attr.log_size = ste->order;
+ rtc_attr.log_depth = 0;
+ rtc_attr.update_index_mode = MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET;
+ /* The action STEs use the default always hit definer */
+ rtc_attr.match_definer_0 = ctx->caps->trivial_match_definer;
+ rtc_attr.is_frst_jumbo = false;
+ rtc_attr.miss_ft_id = 0;
+ break;
+
+ default:
+ mlx5hws_err(ctx, "HWS Invalid RTC type\n");
+ return -EINVAL;
+ }
+
+ obj_id = mlx5hws_pool_chunk_get_base_id(ste_pool, ste);
+
+ rtc_attr.pd = ctx->pd_num;
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.ste_offset = ste->offset;
+ rtc_attr.reparse_mode = mlx5hws_context_get_reparse_mode(ctx);
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, false);
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, false);
+
+ /* STC is a single resource (obj_id), use any STC for the ID */
+ stc_pool = ctx->stc_pool[tbl->type];
+ default_stc = ctx->common_res[tbl->type].default_stc;
+ obj_id = mlx5hws_pool_chunk_get_base_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_0_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto free_ste;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(ste_pool, ste);
+ rtc_attr.ste_base = obj_id;
+ rtc_attr.table_type = mlx5hws_table_get_res_fw_ft_type(tbl->type, true);
+
+ obj_id = mlx5hws_pool_chunk_get_base_mirror_id(stc_pool, &default_stc->default_hit);
+ rtc_attr.stc_base = obj_id;
+ hws_matcher_set_rtc_attr_sz(matcher, &rtc_attr, rtc_type, true);
+
+ ret = mlx5hws_cmd_rtc_create(ctx->mdev, &rtc_attr, rtc_1_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create peer matcher RTC of type %s",
+ hws_matcher_rtc_type_to_str(rtc_type));
+ goto destroy_rtc_0;
+ }
+ }
+
+ return 0;
+
+destroy_rtc_0:
+ mlx5hws_cmd_rtc_destroy(ctx->mdev, *rtc_0_id);
+free_ste:
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+ return ret;
+}
+
+static void hws_matcher_destroy_rtc(struct mlx5hws_matcher *matcher,
+ enum mlx5hws_matcher_rtc_type rtc_type,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_chunk *ste;
+ struct mlx5hws_pool *ste_pool;
+ u32 rtc_0_id, rtc_1_id;
+
+ switch (rtc_type) {
+ case HWS_MATCHER_RTC_TYPE_MATCH:
+ rtc_0_id = matcher->match_ste.rtc_0_id;
+ rtc_1_id = matcher->match_ste.rtc_1_id;
+ ste_pool = matcher->match_ste.pool;
+ ste = &matcher->match_ste.ste;
+ break;
+ case HWS_MATCHER_RTC_TYPE_STE_ARRAY:
+ action_ste = &matcher->action_ste[action_ste_selector];
+ rtc_0_id = action_ste->rtc_0_id;
+ rtc_1_id = action_ste->rtc_1_id;
+ ste_pool = action_ste->pool;
+ ste = &action_ste->ste;
+ break;
+ default:
+ return;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_1_id);
+
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev, rtc_0_id);
+ if (rtc_type == HWS_MATCHER_RTC_TYPE_MATCH)
+ mlx5hws_pool_chunk_free(ste_pool, ste);
+}
+
+static int
+hws_matcher_check_attr_sz(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (attr->table.sz_col_log > caps->rtc_log_depth_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Matcher depth exceeds limit %d\n",
+ caps->rtc_log_depth_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log > caps->ste_alloc_log_max) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size exceeds limit %d\n",
+ caps->ste_alloc_log_max);
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_col_log + attr->table.sz_row_log < caps->ste_alloc_log_gran) {
+ mlx5hws_err(matcher->tbl->ctx, "Total matcher size below limit %d\n",
+ caps->ste_alloc_log_gran);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hws_matcher_set_pool_attr(struct mlx5hws_pool_attr *attr,
+ struct mlx5hws_matcher *matcher)
+{
+ switch (matcher->attr.optimize_flow_src) {
+ case MLX5HWS_MATCHER_FLOW_SRC_VPORT:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_ORIG;
+ break;
+ case MLX5HWS_MATCHER_FLOW_SRC_WIRE:
+ attr->opt_type = MLX5HWS_POOL_OPTIMIZE_MIRROR;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hws_matcher_check_and_process_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ bool valid;
+ int ret;
+
+ valid = mlx5hws_action_check_combo(ctx, at->action_type_arr, matcher->tbl->type);
+ if (!valid) {
+ mlx5hws_err(ctx, "Invalid combination in action template\n");
+ return -EINVAL;
+ }
+
+ /* Process action template to setters */
+ ret = mlx5hws_action_template_process(at);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to process action template\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hws_matcher_resize_init(struct mlx5hws_matcher *src_matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ resize_data = kzalloc(sizeof(*resize_data), GFP_KERNEL);
+ if (!resize_data)
+ return -ENOMEM;
+
+ resize_data->max_stes = src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+
+ resize_data->action_ste[0].stc = src_matcher->action_ste[0].stc;
+ resize_data->action_ste[0].rtc_0_id = src_matcher->action_ste[0].rtc_0_id;
+ resize_data->action_ste[0].rtc_1_id = src_matcher->action_ste[0].rtc_1_id;
+ resize_data->action_ste[0].pool = src_matcher->action_ste[0].max_stes ?
+ src_matcher->action_ste[0].pool :
+ NULL;
+ resize_data->action_ste[1].stc = src_matcher->action_ste[1].stc;
+ resize_data->action_ste[1].rtc_0_id = src_matcher->action_ste[1].rtc_0_id;
+ resize_data->action_ste[1].rtc_1_id = src_matcher->action_ste[1].rtc_1_id;
+ resize_data->action_ste[1].pool = src_matcher->action_ste[1].max_stes ?
+ src_matcher->action_ste[1].pool :
+ NULL;
+
+ /* Place the new resized matcher on the dst matcher's list */
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+
+ /* Move all the previous resized matchers to the dst matcher's list */
+ while (!list_empty(&src_matcher->resize_data)) {
+ resize_data = list_first_entry(&src_matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+ list_add(&resize_data->list_node, &src_matcher->resize_dst->resize_data);
+ }
+
+ return 0;
+}
+
+static void hws_matcher_resize_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_resize_data *resize_data;
+
+ if (!mlx5hws_matcher_is_resizable(matcher))
+ return;
+
+ while (!list_empty(&matcher->resize_data)) {
+ resize_data = list_first_entry(&matcher->resize_data,
+ struct mlx5hws_matcher_resize_data,
+ list_node);
+ list_del_init(&resize_data->list_node);
+
+ if (resize_data->max_stes) {
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[1].stc);
+ mlx5hws_action_free_single_stc(matcher->tbl->ctx,
+ matcher->tbl->type,
+ &resize_data->action_ste[0].stc);
+
+ if (matcher->tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_1_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_1_id);
+ }
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[1].rtc_0_id);
+ mlx5hws_cmd_rtc_destroy(matcher->tbl->ctx->mdev,
+ resize_data->action_ste[0].rtc_0_id);
+ if (resize_data->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].pool) {
+ mlx5hws_pool_destroy(resize_data->action_ste[1].pool);
+ mlx5hws_pool_destroy(resize_data->action_ste[0].pool);
+ }
+ }
+
+ kfree(resize_data);
+ }
+}
+
+static int
+hws_matcher_bind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_cmd_stc_modify_attr stc_attr = {0};
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ /* Allocate action STE mempool */
+ pool_attr.table_type = tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL;
+ pool_attr.alloc_log_sz = ilog2(roundup_pow_of_two(action_ste->max_stes)) +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+ action_ste->pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!action_ste->pool) {
+ mlx5hws_err(ctx, "Failed to create action ste pool\n");
+ return -EINVAL;
+ }
+
+ /* Allocate action RTC */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action RTC\n");
+ goto free_ste_pool;
+ }
+
+ /* Allocate STC for jumps to STE */
+ stc_attr.action_offset = MLX5HWS_ACTION_OFFSET_HIT;
+ stc_attr.action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE;
+ stc_attr.reparse_mode = MLX5_IFC_STC_REPARSE_IGNORE;
+ stc_attr.ste_table.ste = action_ste->ste;
+ stc_attr.ste_table.ste_pool = action_ste->pool;
+ stc_attr.ste_table.match_definer_id = ctx->caps->trivial_match_definer;
+
+ ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl->type,
+ &action_ste->stc);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create action jump to table STC\n");
+ goto free_rtc;
+ }
+
+ return 0;
+
+free_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+free_ste_pool:
+ mlx5hws_pool_destroy(action_ste->pool);
+ return ret;
+}
+
+static void hws_matcher_unbind_at_idx(struct mlx5hws_matcher *matcher, u8 action_ste_selector)
+{
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_table *tbl = matcher->tbl;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+
+ if (!action_ste->max_stes ||
+ matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION ||
+ mlx5hws_matcher_is_in_resize(matcher))
+ return;
+
+ mlx5hws_action_free_single_stc(tbl->ctx, tbl->type, &action_ste->stc);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_STE_ARRAY, action_ste_selector);
+ mlx5hws_pool_destroy(action_ste->pool);
+}
+
+static int hws_matcher_bind_at(struct mlx5hws_matcher *matcher)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u32 required_stes;
+ u8 max_stes = 0;
+ int i, ret;
+
+ if (matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)
+ return 0;
+
+ for (i = 0; i < matcher->num_of_at; i++) {
+ struct mlx5hws_action_template *at = &matcher->at[i];
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret) {
+ mlx5hws_err(ctx, "Invalid at %d", i);
+ return ret;
+ }
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ max_stes = max(max_stes, required_stes);
+
+ /* Future: Optimize reparse */
+ }
+
+ /* There are no additional STEs required for matcher */
+ if (!max_stes)
+ return 0;
+
+ matcher->action_ste[0].max_stes = max_stes;
+ matcher->action_ste[1].max_stes = max_stes;
+
+ ret = hws_matcher_bind_at_idx(matcher, 0);
+ if (ret)
+ return ret;
+
+ ret = hws_matcher_bind_at_idx(matcher, 1);
+ if (ret)
+ goto free_at_0;
+
+ return 0;
+
+free_at_0:
+ hws_matcher_unbind_at_idx(matcher, 0);
+ return ret;
+}
+
+static void hws_matcher_unbind_at(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_unbind_at_idx(matcher, 1);
+ hws_matcher_unbind_at_idx(matcher, 0);
+}
+
+static int hws_matcher_bind_mt(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_pool_attr pool_attr = {0};
+ int ret;
+
+ /* Calculate match, range and hash definers */
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION)) {
+ ret = mlx5hws_definer_mt_init(ctx, matcher->mt);
+ if (ret) {
+ if (ret == E2BIG)
+ mlx5hws_err(ctx, "Failed to set matcher templates with match definers\n");
+ return ret;
+ }
+ }
+
+ /* Create an STE pool per matcher*/
+ pool_attr.table_type = matcher->tbl->type;
+ pool_attr.pool_type = MLX5HWS_POOL_TYPE_STE;
+ pool_attr.flags = MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL;
+ pool_attr.alloc_log_sz = matcher->attr.table.sz_col_log +
+ matcher->attr.table.sz_row_log;
+ hws_matcher_set_pool_attr(&pool_attr, matcher);
+
+ matcher->match_ste.pool = mlx5hws_pool_create(ctx, &pool_attr);
+ if (!matcher->match_ste.pool) {
+ mlx5hws_err(ctx, "Failed to allocate matcher STE pool\n");
+ ret = -EOPNOTSUPP;
+ goto uninit_match_definer;
+ }
+
+ return 0;
+
+uninit_match_definer:
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(ctx, matcher->mt);
+ return ret;
+}
+
+static void hws_matcher_unbind_mt(struct mlx5hws_matcher *matcher)
+{
+ mlx5hws_pool_destroy(matcher->match_ste.pool);
+ if (!(matcher->flags & MLX5HWS_MATCHER_FLAGS_COLLISION))
+ mlx5hws_definer_mt_uninit(matcher->tbl->ctx, matcher->mt);
+}
+
+static int
+hws_matcher_validate_insert_mode(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ switch (attr->insert_mode) {
+ case MLX5HWS_MATCHER_INSERT_BY_HASH:
+ if (matcher->attr.distribute_mode != MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ mlx5hws_err(ctx, "Invalid matcher distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ case MLX5HWS_MATCHER_INSERT_BY_INDEX:
+ if (attr->table.sz_col_log) {
+ mlx5hws_err(ctx, "Matcher with INSERT_BY_INDEX supports only Nx1 table size\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_HASH) {
+ /* Hash Split Table */
+ if (!caps->rtc_hash_split_table) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and hash distribute\n");
+ return -EOPNOTSUPP;
+ }
+ } else if (attr->distribute_mode == MLX5HWS_MATCHER_DISTRIBUTE_BY_LINEAR) {
+ /* Linear Lookup Table */
+ if (!caps->rtc_linear_lookup_table ||
+ !IS_BIT_SET(caps->access_index_mode,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR)) {
+ mlx5hws_err(ctx, "FW doesn't support insert by index and linear distribute\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->table.sz_row_log > MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX) {
+ mlx5hws_err(ctx, "Matcher with linear distribute: rows exceed limit %d",
+ MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ mlx5hws_err(ctx, "Matcher has unsupported distribute mode\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ mlx5hws_err(ctx, "Matcher has unsupported insert mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hws_matcher_process_attr(struct mlx5hws_cmd_query_caps *caps,
+ struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_matcher_attr *attr = &matcher->attr;
+
+ if (hws_matcher_validate_insert_mode(caps, matcher))
+ return -EOPNOTSUPP;
+
+ if (matcher->tbl->type != MLX5HWS_TABLE_TYPE_FDB && attr->optimize_flow_src) {
+ mlx5hws_err(matcher->tbl->ctx, "NIC domain doesn't support flow_src\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Convert number of rules to the required depth */
+ if (attr->mode == MLX5HWS_MATCHER_RESOURCE_MODE_RULE &&
+ attr->insert_mode == MLX5HWS_MATCHER_INSERT_BY_HASH)
+ attr->table.sz_col_log = hws_matcher_rules_to_tbl_depth(attr->rule.num_log);
+
+ matcher->flags |= attr->resizable ? MLX5HWS_MATCHER_FLAGS_RESIZABLE : 0;
+
+ return hws_matcher_check_attr_sz(caps, matcher);
+}
+
+static int hws_matcher_create_and_connect(struct mlx5hws_matcher *matcher)
+{
+ int ret;
+
+ /* Select and create the definers for current matcher */
+ ret = hws_matcher_bind_mt(matcher);
+ if (ret)
+ return ret;
+
+ /* Calculate and verify action combination */
+ ret = hws_matcher_bind_at(matcher);
+ if (ret)
+ goto unbind_mt;
+
+ /* Create matcher end flow table anchor */
+ ret = hws_matcher_create_end_ft(matcher);
+ if (ret)
+ goto unbind_at;
+
+ /* Allocate the RTC for the new matcher */
+ ret = hws_matcher_create_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ if (ret)
+ goto destroy_end_ft;
+
+ /* Connect the matcher to the matcher list */
+ ret = hws_matcher_connect(matcher);
+ if (ret)
+ goto destroy_rtc;
+
+ return 0;
+
+destroy_rtc:
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+destroy_end_ft:
+ hws_matcher_destroy_end_ft(matcher);
+unbind_at:
+ hws_matcher_unbind_at(matcher);
+unbind_mt:
+ hws_matcher_unbind_mt(matcher);
+ return ret;
+}
+
+static void hws_matcher_destroy_and_disconnect(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_resize_uninit(matcher);
+ hws_matcher_disconnect(matcher);
+ hws_matcher_destroy_rtc(matcher, HWS_MATCHER_RTC_TYPE_MATCH, 0);
+ hws_matcher_destroy_end_ft(matcher);
+ hws_matcher_unbind_at(matcher);
+ hws_matcher_unbind_mt(matcher);
+}
+
+static int
+hws_matcher_create_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_matcher *col_matcher;
+ int ret;
+
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return 0;
+
+ if (!hws_matcher_requires_col_tbl(matcher->attr.rule.num_log))
+ return 0;
+
+ col_matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!col_matcher)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&col_matcher->resize_data);
+
+ col_matcher->tbl = matcher->tbl;
+ col_matcher->mt = matcher->mt;
+ col_matcher->at = matcher->at;
+ col_matcher->num_of_at = matcher->num_of_at;
+ col_matcher->num_of_mt = matcher->num_of_mt;
+ col_matcher->attr.priority = matcher->attr.priority;
+ col_matcher->flags = matcher->flags;
+ col_matcher->flags |= MLX5HWS_MATCHER_FLAGS_COLLISION;
+ col_matcher->attr.mode = MLX5HWS_MATCHER_RESOURCE_MODE_HTABLE;
+ col_matcher->attr.optimize_flow_src = matcher->attr.optimize_flow_src;
+ col_matcher->attr.table.sz_row_log = matcher->attr.rule.num_log;
+ col_matcher->attr.table.sz_col_log = MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH;
+ if (col_matcher->attr.table.sz_row_log > MLX5HWS_MATCHER_ASSURED_ROW_RATIO)
+ col_matcher->attr.table.sz_row_log -= MLX5HWS_MATCHER_ASSURED_ROW_RATIO;
+
+ col_matcher->attr.max_num_of_at_attach = matcher->attr.max_num_of_at_attach;
+
+ ret = hws_matcher_process_attr(ctx->caps, col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ ret = hws_matcher_create_and_connect(col_matcher);
+ if (ret)
+ goto free_col_matcher;
+
+ matcher->col_matcher = col_matcher;
+
+ return 0;
+
+free_col_matcher:
+ kfree(col_matcher);
+ mlx5hws_err(ctx, "Failed to create assured collision matcher\n");
+ return ret;
+}
+
+static void
+hws_matcher_destroy_col_matcher(struct mlx5hws_matcher *matcher)
+{
+ if (matcher->attr.mode != MLX5HWS_MATCHER_RESOURCE_MODE_RULE ||
+ matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX)
+ return;
+
+ if (matcher->col_matcher) {
+ hws_matcher_destroy_and_disconnect(matcher->col_matcher);
+ kfree(matcher->col_matcher);
+ }
+}
+
+static int hws_matcher_init(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret;
+
+ INIT_LIST_HEAD(&matcher->resize_data);
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Allocate matcher resource and connect to the packet pipe */
+ ret = hws_matcher_create_and_connect(matcher);
+ if (ret)
+ goto unlock_err;
+
+ /* Create additional matcher for collision handling */
+ ret = hws_matcher_create_col_matcher(matcher);
+ if (ret)
+ goto destory_and_disconnect;
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+destory_and_disconnect:
+ hws_matcher_destroy_and_disconnect(matcher);
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static int hws_matcher_uninit(struct mlx5hws_matcher *matcher)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+
+ mutex_lock(&ctx->ctrl_lock);
+ hws_matcher_destroy_col_matcher(matcher);
+ hws_matcher_destroy_and_disconnect(matcher);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+}
+
+int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_action_template *at)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(matcher->mt);
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ u32 required_stes;
+ int ret;
+
+ if (!matcher->attr.max_num_of_at_attach) {
+ mlx5hws_dbg(ctx, "Num of current at (%d) exceed allowed value\n",
+ matcher->num_of_at);
+ return -EOPNOTSUPP;
+ }
+
+ ret = hws_matcher_check_and_process_at(matcher, at);
+ if (ret)
+ return ret;
+
+ required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
+ if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
+ mlx5hws_dbg(ctx, "Required STEs [%d] exceeds initial action template STE [%d]\n",
+ required_stes,
+ matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes);
+ return -ENOMEM;
+ }
+
+ matcher->at[matcher->num_of_at] = *at;
+ matcher->num_of_at += 1;
+ matcher->attr.max_num_of_at_attach -= 1;
+
+ if (matcher->col_matcher)
+ matcher->col_matcher->num_of_at = matcher->num_of_at;
+
+ return 0;
+}
+
+static int
+hws_matcher_set_templates(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at)
+{
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ int ret = 0;
+ int i;
+
+ if (!num_of_mt || !num_of_at) {
+ mlx5hws_err(ctx, "Number of action/match template cannot be zero\n");
+ return -EOPNOTSUPP;
+ }
+
+ matcher->mt = kcalloc(num_of_mt, sizeof(*matcher->mt), GFP_KERNEL);
+ if (!matcher->mt)
+ return -ENOMEM;
+
+ matcher->at = kcalloc(num_of_at + matcher->attr.max_num_of_at_attach,
+ sizeof(*matcher->at),
+ GFP_KERNEL);
+ if (!matcher->at) {
+ mlx5hws_err(ctx, "Failed to allocate action template array\n");
+ ret = -ENOMEM;
+ goto free_mt;
+ }
+
+ for (i = 0; i < num_of_mt; i++)
+ matcher->mt[i] = *mt[i];
+
+ for (i = 0; i < num_of_at; i++)
+ matcher->at[i] = *at[i];
+
+ matcher->num_of_mt = num_of_mt;
+ matcher->num_of_at = num_of_at;
+
+ return 0;
+
+free_mt:
+ kfree(matcher->mt);
+ return ret;
+}
+
+static void
+hws_matcher_unset_templates(struct mlx5hws_matcher *matcher)
+{
+ kfree(matcher->at);
+ kfree(matcher->mt);
+}
+
+struct mlx5hws_matcher *
+mlx5hws_matcher_create(struct mlx5hws_table *tbl,
+ struct mlx5hws_match_template *mt[],
+ u8 num_of_mt,
+ struct mlx5hws_action_template *at[],
+ u8 num_of_at,
+ struct mlx5hws_matcher_attr *attr)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_matcher *matcher;
+ int ret;
+
+ matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+ if (!matcher)
+ return NULL;
+
+ matcher->tbl = tbl;
+ matcher->attr = *attr;
+
+ ret = hws_matcher_process_attr(tbl->ctx->caps, matcher);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_set_templates(matcher, mt, num_of_mt, at, num_of_at);
+ if (ret)
+ goto free_matcher;
+
+ ret = hws_matcher_init(matcher);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise matcher: %d\n", ret);
+ goto unset_templates;
+ }
+
+ return matcher;
+
+unset_templates:
+ hws_matcher_unset_templates(matcher);
+free_matcher:
+ kfree(matcher);
+ return NULL;
+}
+
+int mlx5hws_matcher_destroy(struct mlx5hws_matcher *matcher)
+{
+ hws_matcher_uninit(matcher);
+ hws_matcher_unset_templates(matcher);
+ kfree(matcher);
+ return 0;
+}
+
+struct mlx5hws_match_template *
+mlx5hws_match_template_create(struct mlx5hws_context *ctx,
+ u32 *match_param,
+ u32 match_param_sz,
+ u8 match_criteria_enable)
+{
+ struct mlx5hws_match_template *mt;
+
+ mt = kzalloc(sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return NULL;
+
+ mt->match_param = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!mt->match_param)
+ goto free_template;
+
+ memcpy(mt->match_param, match_param, match_param_sz);
+ mt->match_criteria_enable = match_criteria_enable;
+
+ return mt;
+
+free_template:
+ kfree(mt);
+ return NULL;
+}
+
+int mlx5hws_match_template_destroy(struct mlx5hws_match_template *mt)
+{
+ kfree(mt->match_param);
+ kfree(mt);
+ return 0;
+}
+
+static int hws_matcher_resize_precheck(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+ int i;
+
+ if (src_matcher->tbl->type != dst_matcher->tbl->type) {
+ mlx5hws_err(ctx, "Table type mismatch for src/dst matchers\n");
+ return -EINVAL;
+ }
+
+ if (!mlx5hws_matcher_is_resizable(src_matcher) ||
+ !mlx5hws_matcher_is_resizable(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is not resizable\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_insert_by_idx(src_matcher) !=
+ mlx5hws_matcher_is_insert_by_idx(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matchers insert mode mismatch\n");
+ return -EINVAL;
+ }
+
+ if (mlx5hws_matcher_is_in_resize(src_matcher) ||
+ mlx5hws_matcher_is_in_resize(dst_matcher)) {
+ mlx5hws_err(ctx, "Src/dst matcher is already in resize\n");
+ return -EINVAL;
+ }
+
+ /* Compare match templates - make sure the definers are equivalent */
+ if (src_matcher->num_of_mt != dst_matcher->num_of_mt) {
+ mlx5hws_err(ctx, "Src/dst matcher match templates mismatch\n");
+ return -EINVAL;
+ }
+
+ if (src_matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes >
+ dst_matcher->action_ste[0].max_stes) {
+ mlx5hws_err(ctx, "Src/dst matcher max STEs mismatch\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < src_matcher->num_of_mt; i++) {
+ if (mlx5hws_definer_compare(src_matcher->mt[i].definer,
+ dst_matcher->mt[i].definer)) {
+ mlx5hws_err(ctx, "Src/dst matcher definers mismatch\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_matcher_resize_set_target(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_matcher *dst_matcher)
+{
+ int ret = 0;
+
+ mutex_lock(&src_matcher->tbl->ctx->ctrl_lock);
+
+ ret = hws_matcher_resize_precheck(src_matcher, dst_matcher);
+ if (ret)
+ goto out;
+
+ src_matcher->resize_dst = dst_matcher;
+
+ ret = hws_matcher_resize_init(src_matcher);
+ if (ret)
+ src_matcher->resize_dst = NULL;
+
+out:
+ mutex_unlock(&src_matcher->tbl->ctx->ctrl_lock);
+ return ret;
+}
+
+int mlx5hws_matcher_resize_rule_move(struct mlx5hws_matcher *src_matcher,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = src_matcher->tbl->ctx;
+
+ if (unlikely(!mlx5hws_matcher_is_in_resize(src_matcher))) {
+ mlx5hws_err(ctx, "Matcher is not resizable or not in resize\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(src_matcher != rule->matcher)) {
+ mlx5hws_err(ctx, "Rule doesn't belong to src matcher\n");
+ return -EINVAL;
+ }
+
+ return mlx5hws_rule_move_hws_add(rule, attr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
new file mode 100644
index 000000000000..125391d1a114
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_matcher.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_MATCHER_H_
+#define MLX5HWS_MATCHER_H_
+
+/* We calculated that concatenating a collision table to the main table with
+ * 3% of the main table rows will be enough resources for high insertion
+ * success probability.
+ *
+ * The calculation: log2(2^x * 3 / 100) = log2(2^x) + log2(3/100) = x - 5.05 ~ 5
+ */
+#define MLX5HWS_MATCHER_ASSURED_ROW_RATIO 5
+/* Threshold to determine if amount of rules require a collision table */
+#define MLX5HWS_MATCHER_ASSURED_RULES_TH 10
+/* Required depth of an assured collision table */
+#define MLX5HWS_MATCHER_ASSURED_COL_TBL_DEPTH 4
+/* Required depth of the main large table */
+#define MLX5HWS_MATCHER_ASSURED_MAIN_TBL_DEPTH 2
+
+enum mlx5hws_matcher_offset {
+ MLX5HWS_MATCHER_OFFSET_TAG_DW1 = 12,
+ MLX5HWS_MATCHER_OFFSET_TAG_DW0 = 13,
+};
+
+enum mlx5hws_matcher_flags {
+ MLX5HWS_MATCHER_FLAGS_COLLISION = 1 << 2,
+ MLX5HWS_MATCHER_FLAGS_RESIZABLE = 1 << 3,
+};
+
+struct mlx5hws_match_template {
+ struct mlx5hws_definer *definer;
+ struct mlx5hws_definer_fc *fc;
+ u32 *match_param;
+ u8 match_criteria_enable;
+ u16 fc_sz;
+};
+
+struct mlx5hws_matcher_match_ste {
+ struct mlx5hws_pool_chunk ste;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_action_ste {
+ struct mlx5hws_pool_chunk ste;
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+};
+
+struct mlx5hws_matcher_resize_data_node {
+ struct mlx5hws_pool_chunk stc;
+ u32 rtc_0_id;
+ u32 rtc_1_id;
+ struct mlx5hws_pool *pool;
+};
+
+struct mlx5hws_matcher_resize_data {
+ struct mlx5hws_matcher_resize_data_node action_ste[2];
+ u8 max_stes;
+ struct list_head list_node;
+};
+
+struct mlx5hws_matcher {
+ struct mlx5hws_table *tbl;
+ struct mlx5hws_matcher_attr attr;
+ struct mlx5hws_match_template *mt;
+ struct mlx5hws_action_template *at;
+ u8 num_of_at;
+ u8 num_of_mt;
+ /* enum mlx5hws_matcher_flags */
+ u8 flags;
+ u32 end_ft_id;
+ struct mlx5hws_matcher *col_matcher;
+ struct mlx5hws_matcher *resize_dst;
+ struct mlx5hws_matcher_match_ste match_ste;
+ struct mlx5hws_matcher_action_ste action_ste[2];
+ struct list_head list_node;
+ struct list_head resize_data;
+};
+
+static inline bool
+mlx5hws_matcher_mt_is_jumbo(struct mlx5hws_match_template *mt)
+{
+ return mlx5hws_definer_is_jumbo(mt->definer);
+}
+
+static inline bool mlx5hws_matcher_is_resizable(struct mlx5hws_matcher *matcher)
+{
+ return !!(matcher->flags & MLX5HWS_MATCHER_FLAGS_RESIZABLE);
+}
+
+static inline bool mlx5hws_matcher_is_in_resize(struct mlx5hws_matcher *matcher)
+{
+ return !!matcher->resize_dst;
+}
+
+static inline bool mlx5hws_matcher_is_insert_by_idx(struct mlx5hws_matcher *matcher)
+{
+ return matcher->attr.insert_mode == MLX5HWS_MATCHER_INSERT_BY_INDEX;
+}
+
+#endif /* MLX5HWS_MATCHER_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
new file mode 100644
index 000000000000..e084a5cbf81f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size)
+{
+ /* Return the roundup of log2(data_size) */
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE)
+ return MLX5HWS_ARG_CHUNK_SIZE_1;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 2)
+ return MLX5HWS_ARG_CHUNK_SIZE_2;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 4)
+ return MLX5HWS_ARG_CHUNK_SIZE_3;
+ if (data_size <= MLX5HWS_ARG_DATA_SIZE * 8)
+ return MLX5HWS_ARG_CHUNK_SIZE_4;
+
+ return MLX5HWS_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size)
+{
+ return BIT(mlx5hws_arg_data_size_to_arg_log_size(data_size));
+}
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions)
+{
+ return mlx5hws_arg_data_size_to_arg_log_size(num_of_actions *
+ MLX5HWS_MODIFY_ACTION_SIZE);
+}
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions)
+{
+ return BIT(mlx5hws_arg_get_arg_log_size(num_of_actions));
+}
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions)
+{
+ u16 i, field;
+ u8 action_id;
+
+ for (i = 0; i < num_of_actions; i++) {
+ action_id = MLX5_GET(set_action_in, &actions[i], action_type);
+
+ switch (action_id) {
+ case MLX5_MODIFICATION_TYPE_NOP:
+ field = MLX5_MODI_OUT_NONE;
+ break;
+
+ case MLX5_MODIFICATION_TYPE_SET:
+ case MLX5_MODIFICATION_TYPE_ADD:
+ field = MLX5_GET(set_action_in, &actions[i], field);
+ break;
+
+ case MLX5_MODIFICATION_TYPE_COPY:
+ case MLX5_MODIFICATION_TYPE_ADD_FIELD:
+ field = MLX5_GET(copy_action_in, &actions[i], dst_field);
+ break;
+
+ default:
+ /* Insert/Remove/Unknown actions require reparse */
+ return true;
+ }
+
+ /* Below fields can change packet structure require a reparse */
+ if (field == MLX5_MODI_OUT_ETHERTYPE ||
+ field == MLX5_MODI_OUT_IPV6_NEXT_HDR)
+ return true;
+ }
+
+ return false;
+}
+
+/* Cache and cache element handling */
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache)
+{
+ struct mlx5hws_pattern_cache *new_cache;
+
+ new_cache = kzalloc(sizeof(*new_cache), GFP_KERNEL);
+ if (!new_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&new_cache->ptrn_list);
+ mutex_init(&new_cache->lock);
+
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache)
+{
+ mutex_destroy(&cache->lock);
+ kfree(cache);
+}
+
+static bool mlx5hws_pat_compare_pattern(int cur_num_of_actions,
+ __be64 cur_actions[],
+ int num_of_actions,
+ __be64 actions[])
+{
+ int i;
+
+ if (cur_num_of_actions != num_of_actions)
+ return false;
+
+ for (i = 0; i < num_of_actions; i++) {
+ u8 action_id =
+ MLX5_GET(set_action_in, &actions[i], action_type);
+
+ if (action_id == MLX5_MODIFICATION_TYPE_COPY ||
+ action_id == MLX5_MODIFICATION_TYPE_ADD_FIELD) {
+ if (actions[i] != cur_actions[i])
+ return false;
+ } else {
+ /* Compare just the control, not the values */
+ if ((__force __be32)actions[i] !=
+ (__force __be32)cur_actions[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pat = NULL;
+
+ list_for_each_entry(cached_pat, &cache->ptrn_list, ptrn_list_node) {
+ if (mlx5hws_pat_compare_pattern(cached_pat->mh_data.num_of_actions,
+ (__be64 *)cached_pat->mh_data.data,
+ num_of_actions,
+ actions))
+ return cached_pat;
+ }
+
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
+ if (cached_pattern) {
+ /* LRU: move it to be first in the list */
+ list_del_init(&cached_pattern->ptrn_list_node);
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount++;
+ }
+
+ return cached_pattern;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_add_pattern_to_cache(struct mlx5hws_pattern_cache *cache,
+ u32 pattern_id,
+ u16 num_of_actions,
+ __be64 *actions)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ cached_pattern = kzalloc(sizeof(*cached_pattern), GFP_KERNEL);
+ if (!cached_pattern)
+ return NULL;
+
+ cached_pattern->mh_data.num_of_actions = num_of_actions;
+ cached_pattern->mh_data.pattern_id = pattern_id;
+ cached_pattern->mh_data.data =
+ kmemdup(actions, num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE, GFP_KERNEL);
+ if (!cached_pattern->mh_data.data)
+ goto free_cached_obj;
+
+ list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ cached_pattern->refcount = 1;
+
+ return cached_pattern;
+
+free_cached_obj:
+ kfree(cached_pattern);
+ return NULL;
+}
+
+static struct mlx5hws_pattern_cache_item *
+mlx5hws_pat_find_cached_pattern_by_id(struct mlx5hws_pattern_cache *cache,
+ u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache_item *cached_pattern = NULL;
+
+ list_for_each_entry(cached_pattern, &cache->ptrn_list, ptrn_list_node) {
+ if (cached_pattern->mh_data.pattern_id == ptrn_id)
+ return cached_pattern;
+ }
+
+ return NULL;
+}
+
+static void
+mlx5hws_pat_remove_pattern(struct mlx5hws_pattern_cache_item *cached_pattern)
+{
+ list_del_init(&cached_pattern->ptrn_list_node);
+
+ kfree(cached_pattern->mh_data.data);
+ kfree(cached_pattern);
+}
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx, u32 ptrn_id)
+{
+ struct mlx5hws_pattern_cache *cache = ctx->pattern_cache;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+
+ mutex_lock(&cache->lock);
+ cached_pattern = mlx5hws_pat_find_cached_pattern_by_id(cache, ptrn_id);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to find cached pattern with provided ID\n");
+ pr_warn("HWS: pattern ID %d is not found\n", ptrn_id);
+ goto out;
+ }
+
+ if (--cached_pattern->refcount)
+ goto out;
+
+ mlx5hws_pat_remove_pattern(cached_pattern);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
+
+out:
+ mutex_unlock(&cache->lock);
+}
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern, size_t pattern_sz,
+ u32 *pattern_id)
+{
+ u16 num_of_actions = pattern_sz / MLX5HWS_MODIFY_ACTION_SIZE;
+ struct mlx5hws_pattern_cache_item *cached_pattern;
+ u32 ptrn_id = 0;
+ int ret = 0;
+
+ mutex_lock(&ctx->pattern_cache->lock);
+
+ cached_pattern = mlx5hws_pat_get_existing_cached_pattern(ctx->pattern_cache,
+ num_of_actions,
+ pattern);
+ if (cached_pattern) {
+ *pattern_id = cached_pattern->mh_data.pattern_id;
+ goto out_unlock;
+ }
+
+ ret = mlx5hws_cmd_header_modify_pattern_create(ctx->mdev,
+ pattern_sz,
+ (u8 *)pattern,
+ &ptrn_id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to create pattern FW object\n");
+ goto out_unlock;
+ }
+
+ cached_pattern = mlx5hws_pat_add_pattern_to_cache(ctx->pattern_cache,
+ ptrn_id,
+ num_of_actions,
+ pattern);
+ if (!cached_pattern) {
+ mlx5hws_err(ctx, "Failed to add pattern to cache\n");
+ ret = -EINVAL;
+ goto clean_pattern;
+ }
+
+ mutex_unlock(&ctx->pattern_cache->lock);
+ *pattern_id = ptrn_id;
+
+ return ret;
+
+clean_pattern:
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+out_unlock:
+ mutex_unlock(&ctx->pattern_cache->lock);
+ return ret;
+}
+
+static void
+mlx5d_arg_init_send_attr(struct mlx5hws_send_engine_post_attr *send_attr,
+ void *comp_data,
+ u32 arg_idx)
+{
+ send_attr->opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr->opmod = MLX5HWS_WQE_GTA_OPMOD_MOD_ARG;
+ send_attr->len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ send_attr->id = arg_idx;
+ send_attr->user_data = comp_data;
+}
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg = NULL;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl = NULL;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, NULL, arg_idx);
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ mlx5hws_action_prepare_decap_l3_data(arg_data, (u8 *)wqe_arg,
+ num_of_actions);
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_arg *wqe_arg;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ int i, full_iter, leftover;
+ size_t wqe_len;
+
+ mlx5d_arg_init_send_attr(&send_attr, comp_data, arg_idx);
+
+ /* Each WQE can hold 64B of data, it might require multiple iteration */
+ full_iter = data_size / MLX5HWS_ARG_DATA_SIZE;
+ leftover = data_size & (MLX5HWS_ARG_DATA_SIZE - 1);
+
+ for (i = 0; i < full_iter; i++) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, wqe_len);
+ send_attr.id = arg_idx++;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+
+ /* Move to next argument data */
+ arg_data += MLX5HWS_ARG_DATA_SIZE;
+ }
+
+ if (leftover) {
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ memset(wqe_ctrl, 0, wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_arg, &wqe_len);
+ memcpy(wqe_arg, arg_data, leftover);
+ send_attr.id = arg_idx;
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+ }
+}
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size)
+{
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ /* Get the control queue */
+ queue = &ctx->send_queue[ctx->queues - 1];
+
+ mlx5hws_arg_write(queue, arg_data, arg_idx, arg_data, data_size);
+
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll for completion */
+ ret = mlx5hws_send_queue_action(ctx, ctx->queues - 1,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ret)
+ mlx5hws_err(ctx, "Failed to drain arg queue\n");
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return ret;
+}
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size)
+{
+ if (arg_size < ctx->caps->log_header_modify_argument_granularity ||
+ arg_size > ctx->caps->log_header_modify_argument_max_alloc) {
+ return false;
+ }
+ return true;
+}
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ u16 single_arg_log_sz;
+ u16 multi_arg_log_sz;
+ int ret;
+ u32 id;
+
+ single_arg_log_sz = mlx5hws_arg_data_size_to_arg_log_size(data_sz);
+ multi_arg_log_sz = single_arg_log_sz + log_bulk_sz;
+
+ if (single_arg_log_sz >= MLX5HWS_ARG_CHUNK_SIZE_MAX) {
+ mlx5hws_err(ctx, "Requested single arg %u not supported\n", single_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5hws_arg_is_valid_arg_request_size(ctx, multi_arg_log_sz)) {
+ mlx5hws_err(ctx, "Argument log size %d not supported by FW\n", multi_arg_log_sz);
+ return -EOPNOTSUPP;
+ }
+
+ /* Alloc bulk of args */
+ ret = mlx5hws_cmd_arg_create(ctx->mdev, multi_arg_log_sz, ctx->pd_num, &id);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed allocating arg in order: %d\n", multi_arg_log_sz);
+ return ret;
+ }
+
+ if (write_data) {
+ ret = mlx5hws_arg_write_inline_arg_data(ctx, id,
+ data, data_sz);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed writing arg data\n");
+ mlx5hws_cmd_arg_destroy(ctx->mdev, id);
+ return ret;
+ }
+ }
+
+ *arg_id = id;
+ return ret;
+}
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id)
+{
+ mlx5hws_cmd_arg_destroy(ctx->mdev, arg_id);
+}
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id)
+{
+ size_t data_sz = num_of_actions * MLX5HWS_MODIFY_ACTION_SIZE;
+ int ret;
+
+ ret = mlx5hws_arg_create(ctx,
+ (u8 *)data,
+ data_sz,
+ log_bulk_sz,
+ write_data,
+ arg_id);
+ if (ret)
+ mlx5hws_err(ctx, "Failed creating modify header arg\n");
+
+ return ret;
+}
+
+static int
+hws_action_modify_check_field_limitation(u8 action_type, __be64 *pattern)
+{
+ /* Need to check field limitation here, but for now - return OK */
+ return 0;
+}
+
+#define INVALID_FIELD 0xffff
+
+static void
+hws_action_modify_get_target_fields(u8 action_type, __be64 *pattern,
+ u16 *src_field, u16 *dst_field)
+{
+ switch (action_type) {
+ case MLX5_ACTION_TYPE_SET:
+ case MLX5_ACTION_TYPE_ADD:
+ *src_field = MLX5_GET(set_action_in, pattern, field);
+ *dst_field = INVALID_FIELD;
+ break;
+ case MLX5_ACTION_TYPE_COPY:
+ *src_field = MLX5_GET(copy_action_in, pattern, src_field);
+ *dst_field = MLX5_GET(copy_action_in, pattern, dst_field);
+ break;
+ default:
+ pr_warn("HWS: invalid modify header action type %d\n", action_type);
+ }
+}
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz)
+{
+ size_t i;
+
+ for (i = 0; i < sz / MLX5HWS_MODIFY_ACTION_SIZE; i++) {
+ u8 action_type =
+ MLX5_GET(set_action_in, &pattern[i], action_type);
+ if (action_type >= MLX5_MODIFICATION_TYPE_MAX) {
+ mlx5hws_err(ctx, "Unsupported action id %d\n", action_type);
+ return false;
+ }
+ if (hws_action_modify_check_field_limitation(action_type, &pattern[i])) {
+ mlx5hws_err(ctx, "Unsupported action number %zu\n", i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions,
+ size_t max_actions, size_t *new_size,
+ u32 *nope_location, __be64 *new_pat)
+{
+ u16 prev_src_field = 0, prev_dst_field = 0;
+ u16 src_field, dst_field;
+ u8 action_type;
+ size_t i, j;
+
+ *new_size = num_actions;
+ *nope_location = 0;
+
+ if (num_actions == 1)
+ return;
+
+ for (i = 0, j = 0; i < num_actions; i++, j++) {
+ action_type = MLX5_GET(set_action_in, &pattern[i], action_type);
+
+ hws_action_modify_get_target_fields(action_type, &pattern[i],
+ &src_field, &dst_field);
+ if (i % 2) {
+ if (action_type == MLX5_ACTION_TYPE_COPY &&
+ (prev_src_field == src_field ||
+ prev_dst_field == dst_field)) {
+ /* need Nope */
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ memset(&new_pat[j], 0, MLX5HWS_MODIFY_ACTION_SIZE);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ } else if (prev_src_field == src_field) {
+ /* need Nope*/
+ *new_size += 1;
+ *nope_location |= BIT(i);
+ MLX5_SET(set_action_in, &new_pat[j],
+ action_type,
+ MLX5_MODIFICATION_TYPE_NOP);
+ j++;
+ }
+ }
+ memcpy(&new_pat[j], &pattern[i], MLX5HWS_MODIFY_ACTION_SIZE);
+ /* check if no more space */
+ if (j > max_actions) {
+ *new_size = num_actions;
+ *nope_location = 0;
+ return;
+ }
+
+ prev_src_field = src_field;
+ prev_dst_field = dst_field;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
new file mode 100644
index 000000000000..27ca93385b08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pat_arg.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_PAT_ARG_H_
+#define MLX5HWS_PAT_ARG_H_
+
+/* Modify-header arg pool */
+enum mlx5hws_arg_chunk_size {
+ MLX5HWS_ARG_CHUNK_SIZE_1,
+ /* Keep MIN updated when changing */
+ MLX5HWS_ARG_CHUNK_SIZE_MIN = MLX5HWS_ARG_CHUNK_SIZE_1,
+ MLX5HWS_ARG_CHUNK_SIZE_2,
+ MLX5HWS_ARG_CHUNK_SIZE_3,
+ MLX5HWS_ARG_CHUNK_SIZE_4,
+ MLX5HWS_ARG_CHUNK_SIZE_MAX,
+};
+
+enum {
+ MLX5HWS_MODIFY_ACTION_SIZE = 8,
+ MLX5HWS_ARG_DATA_SIZE = 64,
+};
+
+struct mlx5hws_pattern_cache {
+ struct mutex lock; /* Protect pattern list */
+ struct list_head ptrn_list;
+};
+
+struct mlx5hws_pattern_cache_item {
+ struct {
+ u32 pattern_id;
+ u8 *data;
+ u16 num_of_actions;
+ } mh_data;
+ u32 refcount;
+ struct list_head ptrn_list_node;
+};
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_get_arg_log_size(u16 num_of_actions);
+
+u32 mlx5hws_arg_get_arg_size(u16 num_of_actions);
+
+enum mlx5hws_arg_chunk_size
+mlx5hws_arg_data_size_to_arg_log_size(u16 data_size);
+
+u32 mlx5hws_arg_data_size_to_arg_size(u16 data_size);
+
+int mlx5hws_pat_init_pattern_cache(struct mlx5hws_pattern_cache **cache);
+
+void mlx5hws_pat_uninit_pattern_cache(struct mlx5hws_pattern_cache *cache);
+
+bool mlx5hws_pat_verify_actions(struct mlx5hws_context *ctx, __be64 pattern[], size_t sz);
+
+int mlx5hws_arg_create(struct mlx5hws_context *ctx,
+ u8 *data,
+ size_t data_sz,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *arg_id);
+
+void mlx5hws_arg_destroy(struct mlx5hws_context *ctx, u32 arg_id);
+
+int mlx5hws_arg_create_modify_header_arg(struct mlx5hws_context *ctx,
+ __be64 *data,
+ u8 num_of_actions,
+ u32 log_bulk_sz,
+ bool write_data,
+ u32 *modify_hdr_arg_id);
+
+int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
+ __be64 *pattern,
+ size_t pattern_sz,
+ u32 *ptrn_id);
+
+void mlx5hws_pat_put_pattern(struct mlx5hws_context *ctx,
+ u32 ptrn_id);
+
+bool mlx5hws_arg_is_valid_arg_request_size(struct mlx5hws_context *ctx,
+ u32 arg_size);
+
+bool mlx5hws_pat_require_reparse(__be64 *actions, u16 num_of_actions);
+
+void mlx5hws_arg_write(struct mlx5hws_send_engine *queue,
+ void *comp_data,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_arg_decapl3_write(struct mlx5hws_send_engine *queue,
+ u32 arg_idx,
+ u8 *arg_data,
+ u16 num_of_actions);
+
+int mlx5hws_arg_write_inline_arg_data(struct mlx5hws_context *ctx,
+ u32 arg_idx,
+ u8 *arg_data,
+ size_t data_size);
+
+void mlx5hws_pat_calc_nope(__be64 *pattern, size_t num_actions, size_t max_actions,
+ size_t *new_size, u32 *nope_location, __be64 *new_pat);
+#endif /* MLX5HWS_PAT_ARG_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
new file mode 100644
index 000000000000..a8a63e3278be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.c
@@ -0,0 +1,640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "mlx5hws_buddy.h"
+
+static void hws_pool_free_one_resource(struct mlx5hws_pool_resource *resource)
+{
+ switch (resource->pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ mlx5hws_cmd_ste_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ mlx5hws_cmd_stc_destroy(resource->pool->ctx->mdev, resource->base_id);
+ break;
+ default:
+ break;
+ }
+
+ kfree(resource);
+}
+
+static void hws_pool_resource_free(struct mlx5hws_pool *pool,
+ int resource_idx)
+{
+ hws_pool_free_one_resource(pool->resource[resource_idx]);
+ pool->resource[resource_idx] = NULL;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+ pool->mirror_resource[resource_idx] = NULL;
+ }
+}
+
+static struct mlx5hws_pool_resource *
+hws_pool_create_one_resource(struct mlx5hws_pool *pool, u32 log_range,
+ u32 fw_ft_type)
+{
+ struct mlx5hws_cmd_ste_create_attr ste_attr;
+ struct mlx5hws_cmd_stc_create_attr stc_attr;
+ struct mlx5hws_pool_resource *resource;
+ u32 obj_id = 0;
+ int ret;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return NULL;
+
+ switch (pool->type) {
+ case MLX5HWS_POOL_TYPE_STE:
+ ste_attr.log_obj_range = log_range;
+ ste_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_ste_create(pool->ctx->mdev, &ste_attr, &obj_id);
+ break;
+ case MLX5HWS_POOL_TYPE_STC:
+ stc_attr.log_obj_range = log_range;
+ stc_attr.table_type = fw_ft_type;
+ ret = mlx5hws_cmd_stc_create(pool->ctx->mdev, &stc_attr, &obj_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to allocate resource objects\n");
+ goto free_resource;
+ }
+
+ resource->pool = pool;
+ resource->range = 1 << log_range;
+ resource->base_id = obj_id;
+
+ return resource;
+
+free_resource:
+ kfree(resource);
+ return NULL;
+}
+
+static int
+hws_pool_resource_alloc(struct mlx5hws_pool *pool, u32 log_range, int idx)
+{
+ struct mlx5hws_pool_resource *resource;
+ u32 fw_ft_type, opt_log_range;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, false);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+ resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating resource\n");
+ return -EINVAL;
+ }
+
+ pool->resource[idx] = resource;
+
+ if (pool->tbl_type == MLX5HWS_TABLE_TYPE_FDB) {
+ struct mlx5hws_pool_resource *mirror_resource;
+
+ fw_ft_type = mlx5hws_table_get_res_fw_ft_type(pool->tbl_type, true);
+ opt_log_range = pool->opt_type == MLX5HWS_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+ mirror_resource = hws_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+ if (!mirror_resource) {
+ mlx5hws_err(pool->ctx, "Failed allocating mirrored resource\n");
+ hws_pool_free_one_resource(resource);
+ pool->resource[idx] = NULL;
+ return -EINVAL;
+ }
+ pool->mirror_resource[idx] = mirror_resource;
+ }
+
+ return 0;
+}
+
+static unsigned long *hws_pool_create_and_init_bitmap(u32 log_range)
+{
+ unsigned long *cur_bmp;
+
+ cur_bmp = bitmap_zalloc(1 << log_range, GFP_KERNEL);
+ if (!cur_bmp)
+ return NULL;
+
+ bitmap_fill(cur_bmp, 1 << log_range);
+
+ return cur_bmp;
+}
+
+static void hws_pool_buddy_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_buddy_mem *buddy;
+
+ buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "No such buddy (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ mlx5hws_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5hws_buddy_mem *
+hws_pool_buddy_get_next_buddy(struct mlx5hws_pool *pool, int idx,
+ u32 order, bool *is_new_buddy)
+{
+ static struct mlx5hws_buddy_mem *buddy;
+ u32 new_buddy_size;
+
+ buddy = pool->db.buddy_manager->buddies[idx];
+ if (buddy)
+ return buddy;
+
+ new_buddy_size = max(pool->alloc_log_sz, order);
+ *is_new_buddy = true;
+ buddy = mlx5hws_buddy_create(new_buddy_size);
+ if (!buddy) {
+ mlx5hws_err(pool->ctx, "Failed to create buddy order: %d index: %d\n",
+ new_buddy_size, idx);
+ return NULL;
+ }
+
+ if (hws_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, new_buddy_size, idx);
+ mlx5hws_buddy_cleanup(buddy);
+ return NULL;
+ }
+
+ pool->db.buddy_manager->buddies[idx] = buddy;
+
+ return buddy;
+}
+
+static int hws_pool_buddy_get_mem_chunk(struct mlx5hws_pool *pool,
+ int order,
+ u32 *buddy_idx,
+ int *seg)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ bool new_mem = false;
+ int ret = 0;
+ int i;
+
+ *seg = -1;
+
+ /* Find the next free place from the buddy array */
+ while (*seg == -1) {
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = hws_pool_buddy_get_next_buddy(pool, i,
+ order,
+ &new_mem);
+ if (!buddy) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ *seg = mlx5hws_buddy_alloc_mem(buddy, order);
+ if (*seg != -1)
+ goto found;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ONE_RESOURCE) {
+ mlx5hws_err(pool->ctx,
+ "Fail to allocate seg for one resource pool\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (new_mem) {
+ /* We have new memory pool, should be place for us */
+ mlx5hws_err(pool->ctx,
+ "No memory for order: %d with buddy no: %d\n",
+ order, i);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
+
+found:
+ *buddy_idx = i;
+out:
+ return ret;
+}
+
+static int hws_pool_buddy_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over the buddies and find next free slot */
+ ret = hws_pool_buddy_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_buddy_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_buddy_mem *buddy;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ buddy = pool->db.buddy_manager->buddies[i];
+ if (buddy) {
+ mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
+ pool->db.buddy_manager->buddies[i] = NULL;
+ }
+ }
+
+ kfree(pool->db.buddy_manager);
+}
+
+static int hws_pool_buddy_db_init(struct mlx5hws_pool *pool, u32 log_range)
+{
+ pool->db.buddy_manager = kzalloc(sizeof(*pool->db.buddy_manager), GFP_KERNEL);
+ if (!pool->db.buddy_manager)
+ return -ENOMEM;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+ bool new_buddy;
+
+ if (!hws_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+ mlx5hws_err(pool->ctx,
+ "Failed allocating memory on create log_sz: %d\n", log_range);
+ kfree(pool->db.buddy_manager);
+ return -ENOMEM;
+ }
+ }
+
+ pool->p_db_uninit = &hws_pool_buddy_db_uninit;
+ pool->p_get_chunk = &hws_pool_buddy_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_buddy_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_create_resource_on_index(struct mlx5hws_pool *pool,
+ u32 alloc_size, int idx)
+{
+ int ret = hws_pool_resource_alloc(pool, alloc_size, idx);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mlx5hws_pool_elements *
+hws_pool_element_create_new_elem(struct mlx5hws_pool *pool, u32 order, int idx)
+{
+ struct mlx5hws_pool_elements *elem;
+ u32 alloc_size;
+
+ alloc_size = pool->alloc_log_sz;
+
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+
+ /* Sharing the same resource, also means that all the elements are with size 1 */
+ if ((pool->flags & MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+ !(pool->flags & MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+ /* Currently all chunks in size 1 */
+ elem->bitmap = hws_pool_create_and_init_bitmap(alloc_size - order);
+ if (!elem->bitmap) {
+ mlx5hws_err(pool->ctx,
+ "Failed to create bitmap type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_elem;
+ }
+
+ elem->log_size = alloc_size - order;
+ }
+
+ if (hws_pool_create_resource_on_index(pool, alloc_size, idx)) {
+ mlx5hws_err(pool->ctx, "Failed to create resource type: %d: size %d index: %d\n",
+ pool->type, alloc_size, idx);
+ goto free_db;
+ }
+
+ pool->db.element_manager->elements[idx] = elem;
+
+ return elem;
+
+free_db:
+ bitmap_free(elem->bitmap);
+free_elem:
+ kfree(elem);
+ return NULL;
+}
+
+static int hws_pool_element_find_seg(struct mlx5hws_pool_elements *elem, int *seg)
+{
+ unsigned int segment, size;
+
+ size = 1 << elem->log_size;
+
+ segment = find_first_bit(elem->bitmap, size);
+ if (segment >= size) {
+ elem->is_full = true;
+ return -ENOMEM;
+ }
+
+ bitmap_clear(elem->bitmap, segment, 1);
+ *seg = segment;
+ return 0;
+}
+
+static int
+hws_pool_onesize_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ elem = pool->db.element_manager->elements[0];
+ if (!elem)
+ elem = hws_pool_element_create_new_elem(pool, order, 0);
+ if (!elem)
+ goto err_no_elem;
+
+ if (hws_pool_element_find_seg(elem, seg) != 0) {
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+ }
+
+ *idx = 0;
+ elem->num_of_elements++;
+ return 0;
+
+err_no_elem:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int
+hws_pool_general_element_get_mem_chunk(struct mlx5hws_pool *pool, u32 order,
+ u32 *idx, int *seg)
+{
+ int ret, i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ if (!pool->resource[i]) {
+ ret = hws_pool_create_resource_on_index(pool, order, i);
+ if (ret)
+ goto err_no_res;
+ *idx = i;
+ *seg = 0; /* One memory slot in that element */
+ return 0;
+ }
+ }
+
+ mlx5hws_err(pool->ctx, "No more resources (last request order: %d)\n", order);
+ return -ENOMEM;
+
+err_no_res:
+ mlx5hws_err(pool->ctx, "Failed to allocate element for order: %d\n", order);
+ return -ENOMEM;
+}
+
+static int hws_pool_general_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_general_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_pool_general_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+ hws_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void hws_pool_general_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ (void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * allocate resource and give it.
+ * - When free that chunk:
+ * the resource is freed.
+ */
+static int hws_pool_general_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit = &hws_pool_general_element_db_uninit;
+ pool->p_get_chunk = &hws_pool_general_element_db_get_chunk;
+ pool->p_put_chunk = &hws_pool_general_element_db_put_chunk;
+
+ return 0;
+}
+
+static void hws_onesize_element_db_destroy_element(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_elements *elem,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ if (unlikely(!pool->resource[chunk->resource_idx]))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ hws_pool_resource_free(pool, chunk->resource_idx);
+ kfree(elem);
+ pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void hws_onesize_element_db_put_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ struct mlx5hws_pool_elements *elem;
+
+ if (unlikely(chunk->resource_idx))
+ pr_warn("HWS: invalid resource with index %d\n", chunk->resource_idx);
+
+ elem = pool->db.element_manager->elements[chunk->resource_idx];
+ if (!elem) {
+ mlx5hws_err(pool->ctx, "No such element (%d)\n", chunk->resource_idx);
+ return;
+ }
+
+ bitmap_set(elem->bitmap, chunk->offset, 1);
+ elem->is_full = false;
+ elem->num_of_elements--;
+
+ if (pool->flags & MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+ !elem->num_of_elements)
+ hws_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int hws_onesize_element_db_get_chunk(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret = 0;
+
+ /* Go over all memory elements and find/allocate free slot */
+ ret = hws_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+ &chunk->resource_idx,
+ &chunk->offset);
+ if (ret)
+ mlx5hws_err(pool->ctx, "Failed to get free slot for chunk with order: %d\n",
+ chunk->order);
+
+ return ret;
+}
+
+static void hws_onesize_element_db_uninit(struct mlx5hws_pool *pool)
+{
+ struct mlx5hws_pool_elements *elem;
+ int i;
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++) {
+ elem = pool->db.element_manager->elements[i];
+ if (elem) {
+ bitmap_free(elem->bitmap);
+ kfree(elem);
+ pool->db.element_manager->elements[i] = NULL;
+ }
+ }
+ kfree(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ * aloocate the first and only slot of memory/resource
+ * when it ended return error.
+ */
+static int hws_pool_onesize_element_db_init(struct mlx5hws_pool *pool)
+{
+ pool->db.element_manager = kzalloc(sizeof(*pool->db.element_manager), GFP_KERNEL);
+ if (!pool->db.element_manager)
+ return -ENOMEM;
+
+ pool->p_db_uninit = &hws_onesize_element_db_uninit;
+ pool->p_get_chunk = &hws_onesize_element_db_get_chunk;
+ pool->p_put_chunk = &hws_onesize_element_db_put_chunk;
+
+ return 0;
+}
+
+static int hws_pool_db_init(struct mlx5hws_pool *pool,
+ enum mlx5hws_db_type db_type)
+{
+ int ret;
+
+ if (db_type == MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE)
+ ret = hws_pool_general_element_db_init(pool);
+ else if (db_type == MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+ ret = hws_pool_onesize_element_db_init(pool);
+ else
+ ret = hws_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+ if (ret) {
+ mlx5hws_err(pool->ctx, "Failed to init general db : %d (ret: %d)\n", db_type, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hws_pool_db_unint(struct mlx5hws_pool *pool)
+{
+ pool->p_db_uninit(pool);
+}
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ int ret;
+
+ mutex_lock(&pool->lock);
+ ret = pool->p_get_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+
+ return ret;
+}
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ mutex_lock(&pool->lock);
+ pool->p_put_chunk(pool, chunk);
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx, struct mlx5hws_pool_attr *pool_attr)
+{
+ enum mlx5hws_db_type res_db_type;
+ struct mlx5hws_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->ctx = ctx;
+ pool->type = pool_attr->pool_type;
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+ pool->flags = pool_attr->flags;
+ pool->tbl_type = pool_attr->table_type;
+ pool->opt_type = pool_attr->opt_type;
+
+ /* Support general db */
+ if (pool->flags == (MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE;
+ else if (pool->flags == (MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+ res_db_type = MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+ else
+ res_db_type = MLX5HWS_POOL_DB_TYPE_BUDDY;
+
+ pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+ if (hws_pool_db_init(pool, res_db_type))
+ goto free_pool;
+
+ mutex_init(&pool->lock);
+
+ return pool;
+
+free_pool:
+ kfree(pool);
+ return NULL;
+}
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool)
+{
+ int i;
+
+ mutex_destroy(&pool->lock);
+
+ for (i = 0; i < MLX5HWS_POOL_RESOURCE_ARR_SZ; i++)
+ if (pool->resource[i])
+ hws_pool_resource_free(pool, i);
+
+ hws_pool_db_unint(pool);
+
+ kfree(pool);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
new file mode 100644
index 000000000000..621298b352b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_pool.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_POOL_H_
+#define MLX5HWS_POOL_H_
+
+#define MLX5HWS_POOL_STC_LOG_SZ 15
+
+#define MLX5HWS_POOL_RESOURCE_ARR_SZ 100
+
+enum mlx5hws_pool_type {
+ MLX5HWS_POOL_TYPE_STE,
+ MLX5HWS_POOL_TYPE_STC,
+};
+
+struct mlx5hws_pool_chunk {
+ u32 resource_idx;
+ /* Internal offset, relative to base index */
+ int offset;
+ int order;
+};
+
+struct mlx5hws_pool_resource {
+ struct mlx5hws_pool *pool;
+ u32 base_id;
+ u32 range;
+};
+
+enum mlx5hws_pool_flags {
+ /* Only a one resource in that pool */
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+ /* No sharing resources between chunks */
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+ /* All objects are in the same size */
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+ /* Managed by buddy allocator */
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+ /* Allocate pool_type memory on pool creation */
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+ /* These values should be used by the caller */
+ MLX5HWS_POOL_FLAGS_FOR_STC_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+ MLX5HWS_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+ MLX5HWS_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_RESOURCE_PER_CHUNK,
+ MLX5HWS_POOL_FLAGS_FOR_STE_ACTION_POOL =
+ MLX5HWS_POOL_FLAGS_ONE_RESOURCE |
+ MLX5HWS_POOL_FLAGS_BUDDY_MANAGED |
+ MLX5HWS_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5hws_pool_optimize {
+ MLX5HWS_POOL_OPTIMIZE_NONE = 0x0,
+ MLX5HWS_POOL_OPTIMIZE_ORIG = 0x1,
+ MLX5HWS_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5hws_pool_attr {
+ enum mlx5hws_pool_type pool_type;
+ enum mlx5hws_table_type table_type;
+ enum mlx5hws_pool_flags flags;
+ enum mlx5hws_pool_optimize opt_type;
+ /* Allocation size once memory is depleted */
+ size_t alloc_log_sz;
+};
+
+enum mlx5hws_db_type {
+ /* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+ MLX5HWS_POOL_DB_TYPE_GENERAL_SIZE,
+ /* One resource only, all the elements are with same one size */
+ MLX5HWS_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+ /* Many resources, the memory allocated with buddy mechanism */
+ MLX5HWS_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5hws_buddy_manager {
+ struct mlx5hws_buddy_mem *buddies[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_elements {
+ u32 num_of_elements;
+ unsigned long *bitmap;
+ u32 log_size;
+ bool is_full;
+};
+
+struct mlx5hws_element_manager {
+ struct mlx5hws_pool_elements *elements[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5hws_pool_db {
+ enum mlx5hws_db_type type;
+ union {
+ struct mlx5hws_element_manager *element_manager;
+ struct mlx5hws_buddy_manager *buddy_manager;
+ };
+};
+
+typedef int (*mlx5hws_pool_db_get_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_db_put_chunk)(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+typedef void (*mlx5hws_pool_unint_db)(struct mlx5hws_pool *pool);
+
+struct mlx5hws_pool {
+ struct mlx5hws_context *ctx;
+ enum mlx5hws_pool_type type;
+ enum mlx5hws_pool_flags flags;
+ struct mutex lock; /* protect the pool */
+ size_t alloc_log_sz;
+ enum mlx5hws_table_type tbl_type;
+ enum mlx5hws_pool_optimize opt_type;
+ struct mlx5hws_pool_resource *resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ struct mlx5hws_pool_resource *mirror_resource[MLX5HWS_POOL_RESOURCE_ARR_SZ];
+ /* DB */
+ struct mlx5hws_pool_db db;
+ /* Functions */
+ mlx5hws_pool_unint_db p_db_uninit;
+ mlx5hws_pool_db_get_chunk p_get_chunk;
+ mlx5hws_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5hws_pool *
+mlx5hws_pool_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_pool_attr *pool_attr);
+
+int mlx5hws_pool_destroy(struct mlx5hws_pool *pool);
+
+int mlx5hws_pool_chunk_alloc(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+void mlx5hws_pool_chunk_free(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk);
+
+static inline u32
+mlx5hws_pool_chunk_get_base_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->resource[chunk->resource_idx]->base_id;
+}
+
+static inline u32
+mlx5hws_pool_chunk_get_base_mirror_id(struct mlx5hws_pool *pool,
+ struct mlx5hws_pool_chunk *chunk)
+{
+ return pool->mirror_resource[chunk->resource_idx]->base_id;
+}
+#endif /* MLX5HWS_POOL_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
new file mode 100644
index 000000000000..de92cecbeb92
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_prm.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5_PRM_H_
+#define MLX5_PRM_H_
+
+#define MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY 512
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+ MLX5_MODIFICATION_TYPE_INSERT = 0x4,
+ MLX5_MODIFICATION_TYPE_REMOVE = 0x5,
+ MLX5_MODIFICATION_TYPE_NOP = 0x6,
+ MLX5_MODIFICATION_TYPE_REMOVE_WORDS = 0x7,
+ MLX5_MODIFICATION_TYPE_ADD_FIELD = 0x8,
+ MLX5_MODIFICATION_TYPE_MAX,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+ MLX5_MODI_GTP_TEID = 0x6E,
+ MLX5_MODI_OUT_IP_ECN = 0x73,
+ MLX5_MODI_TUNNEL_HDR_DW_1 = 0x75,
+ MLX5_MODI_GTPU_FIRST_EXT_DW_0 = 0x76,
+ MLX5_MODI_HASH_RESULT = 0x81,
+ MLX5_MODI_IN_MPLS_LABEL_0 = 0x8a,
+ MLX5_MODI_IN_MPLS_LABEL_1,
+ MLX5_MODI_IN_MPLS_LABEL_2,
+ MLX5_MODI_IN_MPLS_LABEL_3,
+ MLX5_MODI_IN_MPLS_LABEL_4,
+ MLX5_MODI_OUT_IP_PROTOCOL = 0x4A,
+ MLX5_MODI_OUT_IPV6_NEXT_HDR = 0x4A,
+ MLX5_MODI_META_REG_C_8 = 0x8F,
+ MLX5_MODI_META_REG_C_9 = 0x90,
+ MLX5_MODI_META_REG_C_10 = 0x91,
+ MLX5_MODI_META_REG_C_11 = 0x92,
+ MLX5_MODI_META_REG_C_12 = 0x93,
+ MLX5_MODI_META_REG_C_13 = 0x94,
+ MLX5_MODI_META_REG_C_14 = 0x95,
+ MLX5_MODI_META_REG_C_15 = 0x96,
+ MLX5_MODI_OUT_IPV4_TOTAL_LEN = 0x11D,
+ MLX5_MODI_OUT_IPV6_PAYLOAD_LEN = 0x11E,
+ MLX5_MODI_OUT_IPV4_IHL = 0x11F,
+ MLX5_MODI_OUT_TCP_DATA_OFFSET = 0x120,
+ MLX5_MODI_OUT_ESP_SPI = 0x5E,
+ MLX5_MODI_OUT_ESP_SEQ_NUM = 0x82,
+ MLX5_MODI_OUT_IPSEC_NEXT_HDR = 0x126,
+ MLX5_MODI_INVALID = INT_MAX,
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE = 0x7 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ESW_FLOW_TABLE = 0x8 << 1,
+ MLX5_SET_HCA_CAP_OP_MOD_ESW = 0x9 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_WQE_BASED_FLOW_TABLE = 0x1B << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 = 0x20 << 1,
+};
+
+enum mlx5_ifc_rtc_update_mode {
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_UPDATE_MODE_BY_OFFSET = 0x1,
+};
+
+enum mlx5_ifc_rtc_access_mode {
+ MLX5_IFC_RTC_STE_ACCESS_MODE_BY_HASH = 0x0,
+ MLX5_IFC_RTC_STE_ACCESS_MODE_LINEAR = 0x1,
+};
+
+enum mlx5_ifc_rtc_ste_format {
+ MLX5_IFC_RTC_STE_FORMAT_8DW = 0x4,
+ MLX5_IFC_RTC_STE_FORMAT_11DW = 0x5,
+ MLX5_IFC_RTC_STE_FORMAT_RANGE = 0x7,
+};
+
+enum mlx5_ifc_rtc_reparse_mode {
+ MLX5_IFC_RTC_REPARSE_NEVER = 0x0,
+ MLX5_IFC_RTC_REPARSE_ALWAYS = 0x1,
+ MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
+};
+
+#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
+
+struct mlx5_ifc_rtc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 update_index_mode[0x2];
+ u8 reparse_mode[0x2];
+ u8 num_match_ste[0x4];
+ u8 pd[0x18];
+ u8 reserved_at_a0[0x9];
+ u8 access_index_mode[0x3];
+ u8 num_hash_definer[0x4];
+ u8 update_method[0x1];
+ u8 reserved_at_b1[0x2];
+ u8 log_depth[0x5];
+ u8 log_hash_size[0x8];
+ u8 ste_format_0[0x8];
+ u8 table_type[0x8];
+ u8 ste_format_1[0x8];
+ u8 reserved_at_d8[0x8];
+ u8 match_definer_0[0x20];
+ u8 stc_id[0x20];
+ u8 ste_table_base_id[0x20];
+ u8 ste_table_offset[0x20];
+ u8 reserved_at_160[0x8];
+ u8 miss_flow_table_id[0x18];
+ u8 match_definer_1[0x20];
+ u8 reserved_at_1a0[0x260];
+};
+
+enum mlx5_ifc_stc_action_type {
+ MLX5_IFC_STC_ACTION_TYPE_NOP = 0x00,
+ MLX5_IFC_STC_ACTION_TYPE_COPY = 0x05,
+ MLX5_IFC_STC_ACTION_TYPE_SET = 0x06,
+ MLX5_IFC_STC_ACTION_TYPE_ADD = 0x07,
+ MLX5_IFC_STC_ACTION_TYPE_REMOVE_WORDS = 0x08,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_REMOVE = 0x09,
+ MLX5_IFC_STC_ACTION_TYPE_HEADER_INSERT = 0x0b,
+ MLX5_IFC_STC_ACTION_TYPE_TAG = 0x0c,
+ MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST = 0x0e,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_ENCRYPTION = 0x10,
+ MLX5_IFC_STC_ACTION_TYPE_CRYPTO_IPSEC_DECRYPTION = 0x11,
+ MLX5_IFC_STC_ACTION_TYPE_ASO = 0x12,
+ MLX5_IFC_STC_ACTION_TYPE_TRAILER = 0x13,
+ MLX5_IFC_STC_ACTION_TYPE_COUNTER = 0x14,
+ MLX5_IFC_STC_ACTION_TYPE_ADD_FIELD = 0x1b,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_STE_TABLE = 0x80,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_TIR = 0x81,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT = 0x82,
+ MLX5_IFC_STC_ACTION_TYPE_DROP = 0x83,
+ MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+};
+
+enum mlx5_ifc_stc_reparse_mode {
+ MLX5_IFC_STC_REPARSE_IGNORE = 0x0,
+ MLX5_IFC_STC_REPARSE_NEVER = 0x1,
+ MLX5_IFC_STC_REPARSE_ALWAYS = 0x2,
+};
+
+struct mlx5_ifc_stc_ste_param_ste_table_bits {
+ u8 ste_obj_id[0x20];
+ u8 match_definer_id[0x20];
+ u8 reserved_at_40[0x3];
+ u8 log_hash_size[0x5];
+ u8 reserved_at_48[0x38];
+};
+
+struct mlx5_ifc_stc_ste_param_tir_bits {
+ u8 reserved_at_0[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_table_bits {
+ u8 reserved_at_0[0x8];
+ u8 table_id[0x18];
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_stc_ste_param_flow_counter_bits {
+ u8 flow_counter_id[0x20];
+};
+
+enum {
+ MLX5_ASO_CT_NUM_PER_OBJ = 1,
+ MLX5_ASO_METER_NUM_PER_OBJ = 2,
+ MLX5_ASO_IPSEC_NUM_PER_OBJ = 1,
+ MLX5_ASO_FIRST_HIT_NUM_PER_OBJ = 512,
+};
+
+struct mlx5_ifc_stc_ste_param_execute_aso_bits {
+ u8 aso_object_id[0x20];
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_28[0x18];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits {
+ u8 ipsec_object_id[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_trailer_bits {
+ u8 reserved_at_0[0x8];
+ u8 command[0x4];
+ u8 reserved_at_c[0x2];
+ u8 type[0x2];
+ u8 reserved_at_10[0xa];
+ u8 length[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_header_modify_list_bits {
+ u8 header_modify_pattern_id[0x20];
+ u8 header_modify_argument_id[0x20];
+};
+
+enum mlx5_ifc_header_anchors {
+ MLX5_HEADER_ANCHOR_PACKET_START = 0x0,
+ MLX5_HEADER_ANCHOR_MAC = 0x1,
+ MLX5_HEADER_ANCHOR_FIRST_VLAN_START = 0x2,
+ MLX5_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ MLX5_HEADER_ANCHOR_ESP = 0x08,
+ MLX5_HEADER_ANCHOR_TCP_UDP = 0x09,
+ MLX5_HEADER_ANCHOR_TUNNEL_HEADER = 0x0a,
+ MLX5_HEADER_ANCHOR_INNER_MAC = 0x13,
+ MLX5_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+ MLX5_HEADER_ANCHOR_INNER_TCP_UDP = 0x1a,
+ MLX5_HEADER_ANCHOR_L4_PAYLOAD = 0x1b,
+ MLX5_HEADER_ANCHOR_INNER_L4_PAYLOAD = 0x1c
+};
+
+struct mlx5_ifc_stc_ste_param_remove_bits {
+ u8 action_type[0x4];
+ u8 decap[0x1];
+ u8 reserved_at_5[0x5];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 remove_end_anchor[0x6];
+ u8 reserved_at_18[0x8];
+};
+
+struct mlx5_ifc_stc_ste_param_remove_words_bits {
+ u8 action_type[0x4];
+ u8 reserved_at_4[0x6];
+ u8 remove_start_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 remove_offset[0x7];
+ u8 reserved_at_18[0x2];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_stc_ste_param_insert_bits {
+ u8 action_type[0x4];
+ u8 encap[0x1];
+ u8 inline_data[0x1];
+ u8 reserved_at_6[0x4];
+ u8 insert_anchor[0x6];
+ u8 reserved_at_10[0x1];
+ u8 insert_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 insert_size[0x7];
+ u8 insert_argument[0x20];
+};
+
+struct mlx5_ifc_stc_ste_param_vport_bits {
+ u8 eswitch_owner_vhca_id[0x10];
+ u8 vport_number[0x10];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_21[0x5f];
+};
+
+union mlx5_ifc_stc_param_bits {
+ struct mlx5_ifc_stc_ste_param_ste_table_bits ste_table;
+ struct mlx5_ifc_stc_ste_param_tir_bits tir;
+ struct mlx5_ifc_stc_ste_param_table_bits table;
+ struct mlx5_ifc_stc_ste_param_flow_counter_bits counter;
+ struct mlx5_ifc_stc_ste_param_header_modify_list_bits modify_header;
+ struct mlx5_ifc_stc_ste_param_execute_aso_bits aso;
+ struct mlx5_ifc_stc_ste_param_remove_bits remove_header;
+ struct mlx5_ifc_stc_ste_param_insert_bits insert_header;
+ struct mlx5_ifc_set_action_in_bits add;
+ struct mlx5_ifc_set_action_in_bits set;
+ struct mlx5_ifc_copy_action_in_bits copy;
+ struct mlx5_ifc_stc_ste_param_vport_bits vport;
+ struct mlx5_ifc_stc_ste_param_ipsec_encrypt_bits ipsec_encrypt;
+ struct mlx5_ifc_stc_ste_param_ipsec_decrypt_bits ipsec_decrypt;
+ struct mlx5_ifc_stc_ste_param_trailer_bits trailer;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_IFC_MODIFY_STC_FIELD_SELECT_NEW_STC = BIT(0),
+};
+
+struct mlx5_ifc_stc_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x46];
+ u8 reparse_mode[0x2];
+ u8 table_type[0x8];
+ u8 ste_action_offset[0x8];
+ u8 action_type[0x8];
+ u8 reserved_at_a0[0x60];
+ union mlx5_ifc_stc_param_bits stc_param;
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_ste_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x48];
+ u8 table_type[0x8];
+ u8 reserved_at_90[0x370];
+};
+
+struct mlx5_ifc_definer_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x50];
+ u8 format_id[0x10];
+ u8 reserved_at_60[0x60];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+ u8 reserved_at_120[0x20];
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+ u8 reserved_at_180[0x40];
+ u8 ctrl[0xa0];
+ u8 match_mask[0x160];
+};
+
+struct mlx5_ifc_arg_bits {
+ u8 rsvd0[0x88];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_header_modify_pattern_in_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 pattern_length[0x8];
+ u8 reserved_at_88[0x18];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 pattern_data[MLX5_MAX_ACTIONS_DATA_IN_HEADER_MODIFY * 8];
+};
+
+struct mlx5_ifc_create_rtc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_rtc_bits rtc;
+};
+
+struct mlx5_ifc_create_stc_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_stc_bits stc;
+};
+
+struct mlx5_ifc_create_ste_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_ste_bits ste;
+};
+
+struct mlx5_ifc_create_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_definer_bits definer;
+};
+
+struct mlx5_ifc_create_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_arg_bits arg;
+};
+
+struct mlx5_ifc_create_header_modify_pattern_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_header_modify_pattern_in_bits pattern;
+};
+
+struct mlx5_ifc_generate_wqe_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mode[0x10];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x8];
+ u8 pdn[0x18];
+ u8 reserved_at_a0[0x160];
+ u8 wqe_ctrl[0x80];
+ u8 wqe_gta_ctrl[0x180];
+ u8 wqe_gta_data_0[0x200];
+ u8 wqe_gta_data_1[0x200];
+};
+
+struct mlx5_ifc_generate_wqe_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x1c0];
+ u8 cqe_data[0x200];
+};
+
+enum mlx5_access_aso_opc_mod {
+ ASO_OPC_MOD_IPSEC = 0x0,
+ ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+ ASO_OPC_MOD_POLICER = 0x2,
+ ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+ ASO_OPC_MOD_FLOW_HIT = 0x4,
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION = BIT(0),
+ MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID = BIT(1),
+};
+
+enum {
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT = 0,
+ MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL = 1,
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+#endif /* MLX5_PRM_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
new file mode 100644
index 000000000000..8a011b958b43
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+static void hws_rule_skip(struct mlx5hws_matcher *matcher,
+ struct mlx5hws_match_template *mt,
+ u32 flow_source,
+ bool *skip_rx, bool *skip_tx)
+{
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT) {
+ *skip_rx = true;
+ } else if (flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK) {
+ *skip_tx = true;
+ } else {
+ /* If no flow source was set for current rule,
+ * check for flow source in matcher attributes.
+ */
+ if (matcher->attr.optimize_flow_src) {
+ *skip_tx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx =
+ matcher->attr.optimize_flow_src == MLX5HWS_MATCHER_FLOW_SRC_VPORT;
+ return;
+ }
+ }
+}
+
+static void
+hws_rule_update_copy_tag(struct mlx5hws_rule *rule,
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ bool is_jumbo)
+{
+ struct mlx5hws_rule_match_tag *tag;
+
+ if (!mlx5hws_matcher_is_resizable(rule->matcher)) {
+ tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ tag = (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ }
+
+ if (is_jumbo)
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void hws_rule_init_dep_wqe(struct mlx5hws_send_ring_dep_wqe *dep_wqe,
+ struct mlx5hws_rule *rule,
+ struct mlx5hws_match_template *mt,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = attr->user_data;
+ dep_wqe->direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ hws_rule_skip(matcher, mt, attr->flow_source, &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0_id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0_id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1_id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1_id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+ } else {
+ pr_warn("HWS: invalid tbl->type: %d\n", tbl->type);
+ }
+}
+
+static void hws_rule_move_get_rtc(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_matcher *dst_matcher = rule->matcher->resize_dst;
+
+ if (rule->resize_info->rtc_0) {
+ ste_attr->rtc_0 = dst_matcher->match_ste.rtc_0_id;
+ ste_attr->retry_rtc_0 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_0_id : 0;
+ }
+ if (rule->resize_info->rtc_1) {
+ ste_attr->rtc_1 = dst_matcher->match_ste.rtc_1_id;
+ ste_attr->retry_rtc_1 = dst_matcher->col_matcher ?
+ dst_matcher->col_matcher->match_ste.rtc_1_id : 0;
+ }
+}
+
+static void hws_rule_gen_comp(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5hws_rule_status rule_status_on_succ)
+{
+ enum mlx5hws_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = MLX5HWS_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = MLX5HWS_FLOW_OP_ERROR;
+ rule->status = MLX5HWS_RULE_STATUS_FAILED;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+ mlx5hws_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static void
+hws_rule_save_resize_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ bool is_update)
+{
+ if (!mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (likely(!is_update)) {
+ rule->resize_info = kzalloc(sizeof(*rule->resize_info), GFP_KERNEL);
+ if (unlikely(!rule->resize_info)) {
+ pr_warn("HWS: resize info isn't allocated for rule\n");
+ return;
+ }
+
+ rule->resize_info->max_stes =
+ rule->matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes;
+ rule->resize_info->action_ste_pool[0] = rule->matcher->action_ste[0].max_stes ?
+ rule->matcher->action_ste[0].pool :
+ NULL;
+ rule->resize_info->action_ste_pool[1] = rule->matcher->action_ste[1].max_stes ?
+ rule->matcher->action_ste[1].pool :
+ NULL;
+ }
+
+ memcpy(rule->resize_info->ctrl_seg, ste_attr->wqe_ctrl,
+ sizeof(rule->resize_info->ctrl_seg));
+ memcpy(rule->resize_info->data_seg, ste_attr->wqe_data,
+ sizeof(rule->resize_info->data_seg));
+}
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule)
+{
+ if (mlx5hws_matcher_is_resizable(rule->matcher) &&
+ rule->resize_info) {
+ kfree(rule->resize_info);
+ rule->resize_info = NULL;
+ }
+}
+
+static void
+hws_rule_save_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_match_template *mt = rule->matcher->mt;
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+
+ if (mlx5hws_matcher_is_resizable(rule->matcher))
+ return;
+
+ if (is_jumbo)
+ memcpy(&rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ else
+ memcpy(&rule->tag.match, ste_attr->wqe_data->tag, MLX5HWS_MATCH_TAG_SZ);
+}
+
+static void
+hws_rule_clear_delete_info(struct mlx5hws_rule *rule)
+{
+ /* nothing to do here */
+}
+
+static void
+hws_rule_load_delete_info(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher))) {
+ ste_attr->wqe_tag = &rule->tag;
+ } else {
+ struct mlx5hws_wqe_gta_data_seg_ste *data_seg =
+ (struct mlx5hws_wqe_gta_data_seg_ste *)(void *)rule->resize_info->data_seg;
+ struct mlx5hws_rule_match_tag *tag =
+ (struct mlx5hws_rule_match_tag *)(void *)data_seg->action;
+ ste_attr->wqe_tag = tag;
+ }
+}
+
+static int hws_rule_alloc_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_matcher_action_ste *action_ste;
+ struct mlx5hws_pool_chunk ste = {0};
+ int ret;
+
+ action_ste = &matcher->action_ste[action_ste_selector];
+ ste.order = ilog2(roundup_pow_of_two(action_ste->max_stes));
+ ret = mlx5hws_pool_chunk_alloc(action_ste->pool, &ste);
+ if (unlikely(ret)) {
+ mlx5hws_err(matcher->tbl->ctx,
+ "Failed to allocate STE for rule actions");
+ return ret;
+ }
+ rule->action_ste_idx = ste.offset;
+
+ return 0;
+}
+
+static void hws_rule_free_action_ste_idx(struct mlx5hws_rule *rule,
+ u8 action_ste_selector)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_pool_chunk ste = {0};
+ struct mlx5hws_pool *pool;
+ u8 max_stes;
+
+ if (mlx5hws_matcher_is_resizable(matcher)) {
+ /* Free the original action pool if rule was resized */
+ max_stes = rule->resize_info->max_stes;
+ pool = rule->resize_info->action_ste_pool[action_ste_selector];
+ } else {
+ max_stes = matcher->action_ste[action_ste_selector].max_stes;
+ pool = matcher->action_ste[action_ste_selector].pool;
+ }
+
+ /* This release is safe only when the rule match part was deleted */
+ ste.order = ilog2(roundup_pow_of_two(max_stes));
+ ste.offset = rule->action_ste_idx;
+
+ mlx5hws_pool_chunk_free(pool, &ste);
+}
+
+static int hws_rule_alloc_action_ste(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int action_ste_idx;
+ int ret;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 0);
+ if (unlikely(ret))
+ return ret;
+
+ action_ste_idx = rule->action_ste_idx;
+
+ ret = hws_rule_alloc_action_ste_idx(rule, 1);
+ if (unlikely(ret)) {
+ hws_rule_free_action_ste_idx(rule, 0);
+ return ret;
+ }
+
+ /* Both pools have to return the same index */
+ if (unlikely(rule->action_ste_idx != action_ste_idx)) {
+ pr_warn("HWS: allocation of action STE failed - pool indexes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule)
+{
+ if (rule->action_ste_idx > -1) {
+ hws_rule_free_action_ste_idx(rule, 1);
+ hws_rule_free_action_ste_idx(rule, 0);
+ }
+}
+
+static void hws_rule_create_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_send_ste_attr *ste_attr,
+ struct mlx5hws_actions_apply_data *apply,
+ bool is_update)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_table *tbl = matcher->tbl;
+ struct mlx5hws_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ if (!is_update) {
+ /* In update we use these rtc's */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+ rule->action_ste_selector = 0;
+ } else {
+ rule->action_ste_selector = !rule->action_ste_selector;
+ }
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res[tbl->type];
+ apply->jump_to_action_stc = matcher->action_ste[0].stc.offset;
+ apply->require_dep = 0;
+}
+
+static void hws_rule_move_init(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ /* Save the old RTC IDs to be later used in match STE delete */
+ rule->resize_info->rtc_0 = rule->rtc_0;
+ rule->resize_info->rtc_1 = rule->rtc_1;
+ rule->resize_info->rule_idx = attr->rule_idx;
+
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->action_ste_selector = 0;
+ rule->status = MLX5HWS_RULE_STATUS_CREATING;
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_WRITING;
+}
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule)
+{
+ return mlx5hws_matcher_is_in_resize(rule->matcher) &&
+ rule->resize_info &&
+ rule->resize_info->state != MLX5HWS_RULE_RESIZE_STATE_IDLE;
+}
+
+static int hws_rule_create_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[])
+{
+ struct mlx5hws_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5hws_match_template *mt = &rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(mt);
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_context *ctx = matcher->tbl->ctx;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ struct mlx5hws_actions_wqe_setter *setter;
+ struct mlx5hws_actions_apply_data apply;
+ struct mlx5hws_send_engine *queue;
+ u8 total_stes, action_stes;
+ bool is_update;
+ int i, ret;
+
+ is_update = !match_param;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5hws_send_engine_err(queue)))
+ return -EIO;
+
+ hws_rule_create_init(rule, &ste_attr, &apply, is_update);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5hws_send_add_new_dep_wqe(queue);
+ hws_rule_init_dep_wqe(dep_wqe, rule, mt, attr);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (__force __be32 *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ if (action_stes) {
+ /* Allocate action STEs for rules that need more than match STE */
+ if (!is_update) {
+ ret = hws_rule_alloc_action_ste(rule, attr);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to allocate action memory %d", ret);
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ }
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_0_id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ?
+ matcher->action_ste[rule->action_ste_selector].rtc_1_id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5hws_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE.
+ * For hash split / linear lookup RTCs, packets reaching any STE
+ * will always match and perform the specified actions, which
+ * makes the tag irrelevant.
+ */
+ if (likely(!mlx5hws_matcher_is_insert_by_idx(matcher) && !is_update))
+ mlx5hws_definer_create_tag(match_param, mt->fc, mt->fc_sz,
+ (u8 *)dep_wqe->wqe_data.action);
+ else if (is_update)
+ hws_rule_update_copy_tag(rule, &dep_wqe->wqe_data, is_jumbo);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5hws_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.direct_index = dep_wqe->direct_index;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion and resize info for
+ * moving rules to a new matcher, only after insertion.
+ */
+ if (!is_update)
+ hws_rule_save_delete_info(rule, &ste_attr);
+
+ hws_rule_save_resize_info(rule, &ste_attr, is_update);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void hws_rule_destroy_failed_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5hws_rule_free_action_ste(rule);
+
+ /* Clear complex tag */
+ hws_rule_clear_delete_info(rule);
+
+ /* Clear info that was saved for resizing */
+ mlx5hws_rule_clear_resize_info(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5hws_send_all_dep_wqe(queue);
+ mlx5hws_send_engine_flush_queue(queue);
+}
+
+static int hws_rule_destroy_hws(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ if (unlikely(mlx5hws_send_engine_err(queue))) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5HWS_RULE_STATUS_CREATING)
+ return -EBUSY;
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5HWS_RULE_STATUS_FAILED) {
+ hws_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (rule->skip_delete) {
+ /* Rule shouldn't be deleted in HW.
+ * Generate completion as if write succeeded, and we can
+ * safely release action STEs and clear resize info.
+ */
+ hws_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5HWS_RULE_STATUS_DELETED);
+
+ mlx5hws_rule_free_action_ste(rule);
+ mlx5hws_rule_clear_resize_info(rule);
+ return 0;
+ }
+
+ mlx5hws_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->status = MLX5HWS_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = attr->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+ hws_rule_clear_delete_info(rule);
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data))
+ return -EINVAL;
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5hws_send_engine_full(&ctx->send_queue[attr->queue_id])))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hws_rule_enqueue_precheck_move(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EINVAL;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_create(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ if (unlikely(mlx5hws_matcher_is_in_resize(rule->matcher)))
+ /* Matcher in resize - new rules are not allowed */
+ return -EAGAIN;
+
+ return hws_rule_enqueue_precheck(rule, attr);
+}
+
+static int hws_rule_enqueue_precheck_update(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ struct mlx5hws_matcher *matcher = rule->matcher;
+
+ if (unlikely(!mlx5hws_matcher_is_resizable(rule->matcher) &&
+ !matcher->attr.optimize_using_rule_idx &&
+ !mlx5hws_matcher_is_insert_by_idx(matcher))) {
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(rule->status != MLX5HWS_RULE_STATUS_CREATED))
+ return -EBUSY;
+
+ return hws_rule_enqueue_precheck_create(rule, attr);
+}
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue_ptr,
+ void *user_data)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_wqe_gta_ctrl_seg empty_wqe_ctrl = {0};
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_engine *queue = queue_ptr;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+
+ mlx5hws_send_all_dep_wqe(queue);
+
+ rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_DELETING;
+
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.send_attr.user_data = user_data;
+ ste_attr.rtc_0 = rule->resize_info->rtc_0;
+ ste_attr.rtc_1 = rule->resize_info->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->resize_info->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->resize_info->rtc_1;
+ ste_attr.wqe_ctrl = &empty_wqe_ctrl;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_DEACTIVATE;
+
+ if (unlikely(mlx5hws_matcher_is_insert_by_idx(matcher)))
+ ste_attr.direct_index = rule->resize_info->rule_idx;
+
+ hws_rule_load_delete_info(rule, &ste_attr);
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ bool is_jumbo = mlx5hws_matcher_mt_is_jumbo(rule->matcher->mt);
+ struct mlx5hws_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = rule->matcher;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_engine *queue;
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_move(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ ret = mlx5hws_send_engine_err(queue);
+ if (ret)
+ return ret;
+
+ hws_rule_move_init(rule, attr);
+ hws_rule_move_get_rtc(rule, &ste_attr);
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.fence = 0;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = (struct mlx5hws_wqe_gta_ctrl_seg *)rule->resize_info->ctrl_seg;
+ ste_attr.wqe_data = (struct mlx5hws_wqe_gta_data_seg_ste *)rule->resize_info->data_seg;
+ ste_attr.direct_index = mlx5hws_matcher_is_insert_by_idx(matcher) ?
+ attr->rule_idx : 0;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+ mlx5hws_send_engine_inc_rule(queue);
+
+ if (!attr->burst)
+ mlx5hws_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+int mlx5hws_rule_create(struct mlx5hws_matcher *matcher,
+ u8 mt_idx,
+ u32 *match_param,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr,
+ struct mlx5hws_rule *rule_handle)
+{
+ int ret;
+
+ rule_handle->matcher = matcher;
+
+ ret = hws_rule_enqueue_precheck_create(rule_handle, attr);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!(matcher->num_of_mt >= mt_idx) ||
+ !(matcher->num_of_at >= at_idx) ||
+ !match_param)) {
+ pr_warn("HWS: Invalid rule creation parameters (MTs, ATs or match params)\n");
+ return -EINVAL;
+ }
+
+ ret = hws_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ match_param,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
+
+int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_destroy_hws(rule, attr);
+
+ return ret;
+}
+
+int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
+ u8 at_idx,
+ struct mlx5hws_rule_action rule_actions[],
+ struct mlx5hws_rule_attr *attr)
+{
+ int ret;
+
+ ret = hws_rule_enqueue_precheck_update(rule, attr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = hws_rule_create_hws(rule,
+ attr,
+ 0,
+ NULL,
+ at_idx,
+ rule_actions);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
new file mode 100644
index 000000000000..495cdd17e9f3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_rule.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_RULE_H_
+#define MLX5HWS_RULE_H_
+
+enum {
+ MLX5HWS_STE_CTRL_SZ = 20,
+ MLX5HWS_ACTIONS_SZ = 12,
+ MLX5HWS_MATCH_TAG_SZ = 32,
+ MLX5HWS_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5hws_rule_status {
+ MLX5HWS_RULE_STATUS_UNKNOWN,
+ MLX5HWS_RULE_STATUS_CREATING,
+ MLX5HWS_RULE_STATUS_CREATED,
+ MLX5HWS_RULE_STATUS_DELETING,
+ MLX5HWS_RULE_STATUS_DELETED,
+ MLX5HWS_RULE_STATUS_FAILING,
+ MLX5HWS_RULE_STATUS_FAILED,
+};
+
+enum mlx5hws_rule_move_state {
+ MLX5HWS_RULE_RESIZE_STATE_IDLE,
+ MLX5HWS_RULE_RESIZE_STATE_WRITING,
+ MLX5HWS_RULE_RESIZE_STATE_DELETING,
+};
+
+enum mlx5hws_rule_jumbo_match_tag_offset {
+ MLX5HWS_RULE_JUMBO_MATCH_TAG_OFFSET_DW0 = 8,
+};
+
+struct mlx5hws_rule_match_tag {
+ union {
+ u8 jumbo[MLX5HWS_JUMBO_TAG_SZ];
+ struct {
+ u8 reserved[MLX5HWS_ACTIONS_SZ];
+ u8 match[MLX5HWS_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5hws_rule_resize_info {
+ struct mlx5hws_pool *action_ste_pool[2];
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 rule_idx;
+ u8 state;
+ u8 max_stes;
+ u8 ctrl_seg[MLX5HWS_WQE_SZ_GTA_CTRL]; /* Ctrl segment of STE: 48 bytes */
+ u8 data_seg[MLX5HWS_WQE_SZ_GTA_DATA]; /* Data segment of STE: 64 bytes */
+};
+
+struct mlx5hws_rule {
+ struct mlx5hws_matcher *matcher;
+ union {
+ struct mlx5hws_rule_match_tag tag;
+ struct mlx5hws_rule_resize_info *resize_info;
+ };
+ u32 rtc_0; /* The RTC into which the STE was inserted */
+ u32 rtc_1; /* The RTC into which the STE was inserted */
+ int action_ste_idx; /* STE array index */
+ u8 status; /* enum mlx5hws_rule_status */
+ u8 action_ste_selector; /* For rule update - which action STE is in use */
+ u8 pending_wqes;
+ bool skip_delete; /* For complex rules - another rule with same tag
+ * still exists, so don't actually delete this rule.
+ */
+};
+
+void mlx5hws_rule_free_action_ste(struct mlx5hws_rule *rule);
+
+int mlx5hws_rule_move_hws_remove(struct mlx5hws_rule *rule,
+ void *queue, void *user_data);
+
+int mlx5hws_rule_move_hws_add(struct mlx5hws_rule *rule,
+ struct mlx5hws_rule_attr *attr);
+
+bool mlx5hws_rule_move_in_progress(struct mlx5hws_rule *rule);
+
+void mlx5hws_rule_clear_resize_info(struct mlx5hws_rule *rule);
+
+#endif /* MLX5HWS_RULE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
new file mode 100644
index 000000000000..fb97a15c041a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.c
@@ -0,0 +1,1209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+#include "lib/clock.h"
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ unsigned int idx = send_sq->head_dep_idx++ & (queue->num_entries - 1);
+
+ memset(&send_sq->dep_wqe[idx].wqe_data.tag, 0, MLX5HWS_MATCH_TAG_SZ);
+
+ return &send_sq->dep_wqe[idx];
+}
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ queue->send_ring.send_sq.head_dep_idx--;
+}
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ste_attr ste_attr = {0};
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+
+ ste_attr.send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5HWS_WQE_SZ_GTA_CTRL + MLX5HWS_WQE_SZ_GTA_DATA;
+ ste_attr.gta_opcode = MLX5HWS_WQE_GTA_OP_ACTIVATE;
+
+ /* Fence first from previous depend WQEs */
+ ste_attr.send_attr.fence = 1;
+
+ while (send_sq->head_dep_idx != send_sq->tail_dep_idx) {
+ dep_wqe = &send_sq->dep_wqe[send_sq->tail_dep_idx++ & (queue->num_entries - 1)];
+
+ /* Notify HW on the last WQE */
+ ste_attr.send_attr.notify_hw = (send_sq->tail_dep_idx == send_sq->head_dep_idx);
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ ste_attr.used_id_rtc_0 = &dep_wqe->rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1;
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ ste_attr.direct_index = dep_wqe->direct_index;
+
+ mlx5hws_send_ste(queue, &ste_attr);
+
+ /* Fencing is done only on the first WQE */
+ ste_attr.send_attr.fence = 0;
+ }
+}
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+
+ ctrl.queue = queue;
+ /* Currently only one send ring is supported */
+ ctrl.send_ring = &queue->send_ring;
+ ctrl.num_wqebbs = 0;
+
+ return ctrl;
+}
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &ctrl->send_ring->send_sq;
+ unsigned int idx;
+
+ idx = (send_sq->cur_post + ctrl->num_wqebbs) & send_sq->buf_mask;
+
+ /* Note that *buf is a single MLX5_SEND_WQE_BB. It cannot be used
+ * as buffer of more than one WQE_BB, since the two MLX5_SEND_WQE_BB
+ * can be on 2 different kernel memory pages.
+ */
+ *buf = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+ *len = MLX5_SEND_WQE_BB;
+
+ if (!ctrl->num_wqebbs) {
+ *buf += sizeof(struct mlx5hws_wqe_ctrl_seg);
+ *len -= sizeof(struct mlx5hws_wqe_ctrl_seg);
+ }
+
+ ctrl->num_wqebbs++;
+}
+
+static void hws_send_engine_post_ring(struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_wqe_ctrl_seg *doorbell_cseg)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->cur_post);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, sq->uar_map);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+static void
+hws_send_wqe_set_tag(struct mlx5hws_wqe_gta_data_seg_ste *wqe_data,
+ struct mlx5hws_rule_match_tag *tag,
+ bool is_jumbo)
+{
+ if (is_jumbo) {
+ /* Clear previous possibly dirty control */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ);
+ memcpy(wqe_data->jumbo, tag->jumbo, MLX5HWS_JUMBO_TAG_SZ);
+ } else {
+ /* Clear previous possibly dirty control and actions */
+ memset(wqe_data, 0, MLX5HWS_STE_CTRL_SZ + MLX5HWS_ACTIONS_SZ);
+ memcpy(wqe_data->tag, tag->match, MLX5HWS_MATCH_TAG_SZ);
+ }
+}
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr)
+{
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_ring_sq *sq;
+ unsigned int idx;
+ u32 flags = 0;
+
+ sq = &ctrl->send_ring->send_sq;
+ idx = sq->cur_post & sq->buf_mask;
+ sq->last_idx = idx;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, idx);
+
+ wqe_ctrl->opmod_idx_opcode =
+ cpu_to_be32((attr->opmod << 24) |
+ ((sq->cur_post & 0xffff) << 8) |
+ attr->opcode);
+ wqe_ctrl->qpn_ds =
+ cpu_to_be32((attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16 |
+ sq->sqn << 8);
+ wqe_ctrl->imm = cpu_to_be32(attr->id);
+
+ flags |= attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ flags |= attr->fence ? MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE : 0;
+ wqe_ctrl->flags = cpu_to_be32(flags);
+
+ sq->wr_priv[idx].id = attr->id;
+ sq->wr_priv[idx].retry_id = attr->retry_id;
+
+ sq->wr_priv[idx].rule = attr->rule;
+ sq->wr_priv[idx].user_data = attr->user_data;
+ sq->wr_priv[idx].num_wqebbs = ctrl->num_wqebbs;
+
+ if (attr->rule) {
+ sq->wr_priv[idx].rule->pending_wqes++;
+ sq->wr_priv[idx].used_id = attr->used_id;
+ }
+
+ sq->cur_post += ctrl->num_wqebbs;
+
+ if (attr->notify_hw)
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void hws_send_wqe(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_data,
+ void *send_wqe_tag,
+ bool is_jumbo,
+ u8 gta_opcode,
+ u32 direct_index)
+{
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ size_t wqe_len;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ wqe_ctrl->op_dirix = cpu_to_be32(gta_opcode << 28 | direct_index);
+ memcpy(wqe_ctrl->stc_ix, send_wqe_ctrl->stc_ix,
+ sizeof(send_wqe_ctrl->stc_ix));
+
+ if (send_wqe_data)
+ memcpy(wqe_data, send_wqe_data, sizeof(*wqe_data));
+ else
+ hws_send_wqe_set_tag(wqe_data, send_wqe_tag, is_jumbo);
+
+ mlx5hws_send_engine_post_end(&ctrl, send_attr);
+}
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ u8 notify_hw = send_attr->notify_hw;
+ u8 fence = send_attr->fence;
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ send_attr->fence = fence;
+ send_attr->notify_hw = notify_hw && !ste_attr->rtc_0;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ send_attr->fence = fence && !ste_attr->rtc_1;
+ send_attr->notify_hw = notify_hw;
+ hws_send_wqe(queue, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode,
+ ste_attr->direct_index);
+ }
+
+ /* Restore to original requested values */
+ send_attr->notify_hw = notify_hw;
+ send_attr->fence = fence;
+}
+
+static void hws_send_engine_retry_post_send(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt)
+{
+ struct mlx5hws_send_engine_post_attr send_attr = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_send_engine_post_ctrl ctrl;
+ struct mlx5hws_send_ring_sq *send_sq;
+ unsigned int idx;
+ size_t wqe_len;
+ char *p;
+
+ send_attr.rule = priv->rule;
+ send_attr.opcode = MLX5HWS_WQE_OPCODE_TBL_ACCESS;
+ send_attr.opmod = MLX5HWS_WQE_GTA_OPMOD_STE;
+ send_attr.len = MLX5_SEND_WQE_BB * 2 - sizeof(struct mlx5hws_wqe_ctrl_seg);
+ send_attr.notify_hw = 1;
+ send_attr.fence = 0;
+ send_attr.user_data = priv->user_data;
+ send_attr.id = priv->retry_id;
+ send_attr.used_id = priv->used_id;
+
+ ctrl = mlx5hws_send_engine_post_start(queue);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_ctrl, &wqe_len);
+ mlx5hws_send_engine_post_req_wqe(&ctrl, (void *)&wqe_data, &wqe_len);
+
+ send_sq = &ctrl.send_ring->send_sq;
+ idx = wqe_cnt & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta ctrl */
+ memcpy(wqe_ctrl, p + sizeof(struct mlx5hws_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB - sizeof(struct mlx5hws_wqe_ctrl_seg));
+
+ idx = (wqe_cnt + 1) & send_sq->buf_mask;
+ p = mlx5_wq_cyc_get_wqe(&send_sq->wq, idx);
+
+ /* Copy old gta data */
+ memcpy(wqe_data, p, MLX5_SEND_WQE_BB);
+
+ mlx5hws_send_engine_post_end(&ctrl, &send_attr);
+}
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *sq = &queue->send_ring.send_sq;
+ struct mlx5hws_wqe_ctrl_seg *wqe_ctrl;
+
+ wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx);
+ wqe_ctrl->flags |= cpu_to_be32(MLX5_WQE_CTRL_CQ_UPDATE);
+
+ hws_send_engine_post_ring(sq, wqe_ctrl);
+}
+
+static void
+hws_send_engine_update_rule_resize(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ enum mlx5hws_flow_op_status *status)
+{
+ switch (priv->rule->resize_info->state) {
+ case MLX5HWS_RULE_RESIZE_STATE_WRITING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ /* Backup original RTCs */
+ u32 orig_rtc_0 = priv->rule->resize_info->rtc_0;
+ u32 orig_rtc_1 = priv->rule->resize_info->rtc_1;
+
+ /* Delete partially failed move rule using resize_info */
+ priv->rule->resize_info->rtc_0 = priv->rule->rtc_0;
+ priv->rule->resize_info->rtc_1 = priv->rule->rtc_1;
+
+ /* Move rule to original RTC for future delete */
+ priv->rule->rtc_0 = orig_rtc_0;
+ priv->rule->rtc_1 = orig_rtc_1;
+ }
+ /* Clean leftovers */
+ mlx5hws_rule_move_hws_remove(priv->rule, queue, priv->user_data);
+ break;
+
+ case MLX5HWS_RULE_RESIZE_STATE_DELETING:
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_FAILING) {
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ priv->rule->matcher = priv->rule->matcher->resize_dst;
+ }
+ priv->rule->resize_info->state = MLX5HWS_RULE_RESIZE_STATE_IDLE;
+ priv->rule->status = MLX5HWS_RULE_STATUS_CREATED;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void hws_send_engine_update_rule(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_priv *priv,
+ u16 wqe_cnt,
+ enum mlx5hws_flow_op_status *status)
+{
+ priv->rule->pending_wqes--;
+
+ if (*status == MLX5HWS_FLOW_OP_ERROR) {
+ if (priv->retry_id) {
+ hws_send_engine_retry_post_send(queue, priv, wqe_cnt);
+ return;
+ }
+ /* Some part of the rule failed */
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILING;
+ *priv->used_id = 0;
+ } else {
+ *priv->used_id = priv->id;
+ }
+
+ /* Update rule status for the last completion */
+ if (!priv->rule->pending_wqes) {
+ if (unlikely(mlx5hws_rule_move_in_progress(priv->rule))) {
+ hws_send_engine_update_rule_resize(queue, priv, status);
+ return;
+ }
+
+ if (unlikely(priv->rule->status == MLX5HWS_RULE_STATUS_FAILING)) {
+ /* Rule completely failed and doesn't require cleanup */
+ if (!priv->rule->rtc_0 && !priv->rule->rtc_1)
+ priv->rule->status = MLX5HWS_RULE_STATUS_FAILED;
+
+ *status = MLX5HWS_FLOW_OP_ERROR;
+ } else {
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ priv->rule->status++;
+ *status = MLX5HWS_FLOW_OP_SUCCESS;
+ /* Rule was deleted now we can safely release action STEs
+ * and clear resize info
+ */
+ if (priv->rule->status == MLX5HWS_RULE_STATUS_DELETED) {
+ mlx5hws_rule_free_action_ste(priv->rule);
+ mlx5hws_rule_clear_resize_info(priv->rule);
+ }
+ }
+ }
+}
+
+static void hws_send_engine_update(struct mlx5hws_send_engine *queue,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5hws_send_ring_priv *priv,
+ struct mlx5hws_flow_op_result res[],
+ s64 *i,
+ u32 res_nb,
+ u16 wqe_cnt)
+{
+ enum mlx5hws_flow_op_status status;
+
+ if (!cqe || (likely(be32_to_cpu(cqe->byte_cnt) >> 31 == 0) &&
+ likely(get_cqe_opcode(cqe) == MLX5_CQE_REQ))) {
+ status = MLX5HWS_FLOW_OP_SUCCESS;
+ } else {
+ status = MLX5HWS_FLOW_OP_ERROR;
+ }
+
+ if (priv->user_data) {
+ if (priv->rule) {
+ hws_send_engine_update_rule(queue, priv, wqe_cnt, &status);
+ /* Completion is provided on the last rule WQE */
+ if (priv->rule->pending_wqes)
+ return;
+ }
+
+ if (*i < res_nb) {
+ res[*i].user_data = priv->user_data;
+ res[*i].status = status;
+ (*i)++;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ mlx5hws_send_engine_gen_comp(queue, priv->user_data, status);
+ }
+ }
+}
+
+static int mlx5hws_parse_cqe(struct mlx5hws_send_ring_cq *cq,
+ struct mlx5_cqe64 *cqe64)
+{
+ if (unlikely(get_cqe_opcode(cqe64) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe64;
+
+ mlx5_core_err(cq->mdev, "Bad OP in HWS SQ CQE: 0x%x\n", get_cqe_opcode(cqe64));
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n", err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n", err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ return CQ_POLL_ERR;
+ }
+
+ return CQ_OK;
+}
+
+static int mlx5hws_cq_poll_one(struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_cqe64 *cqe64;
+ int err;
+
+ cqe64 = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe64) {
+ if (unlikely(cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
+ return CQ_EMPTY;
+ }
+
+ mlx5_cqwq_pop(&cq->wq);
+ err = mlx5hws_parse_cqe(cq, cqe64);
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ return err;
+}
+
+static void hws_send_engine_poll_cq(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_send_ring *send_ring = &queue->send_ring;
+ struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq;
+ struct mlx5hws_send_ring_sq *sq = &send_ring->send_sq;
+ struct mlx5hws_send_ring_priv *priv;
+ struct mlx5_cqe64 *cqe;
+ u8 cqe_opcode;
+ u16 wqe_cnt;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return;
+
+ cqe_opcode = get_cqe_opcode(cqe);
+ if (cqe_opcode == MLX5_CQE_INVALID)
+ return;
+
+ if (unlikely(cqe_opcode != MLX5_CQE_REQ))
+ queue->err = true;
+
+ wqe_cnt = be16_to_cpu(cqe->wqe_counter) & sq->buf_mask;
+
+ while (cq->poll_wqe != wqe_cnt) {
+ priv = &sq->wr_priv[cq->poll_wqe];
+ hws_send_engine_update(queue, NULL, priv, res, polled, res_nb, 0);
+ cq->poll_wqe = (cq->poll_wqe + priv->num_wqebbs) & sq->buf_mask;
+ }
+
+ priv = &sq->wr_priv[wqe_cnt];
+ cq->poll_wqe = (wqe_cnt + priv->num_wqebbs) & sq->buf_mask;
+ hws_send_engine_update(queue, cqe, priv, res, polled, res_nb, wqe_cnt);
+ mlx5hws_cq_poll_one(cq);
+}
+
+static void hws_send_engine_poll_list(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ s64 *polled,
+ u32 res_nb)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ while (comp->ci != comp->pi) {
+ if (*polled < res_nb) {
+ res[*polled].status =
+ comp->entries[comp->ci].status;
+ res[*polled].user_data =
+ comp->entries[comp->ci].user_data;
+ (*polled)++;
+ comp->ci = (comp->ci + 1) & comp->mask;
+ mlx5hws_send_engine_dec_rule(queue);
+ } else {
+ return;
+ }
+ }
+}
+
+static int hws_send_engine_poll(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ s64 polled = 0;
+
+ hws_send_engine_poll_list(queue, res, &polled, res_nb);
+
+ if (polled >= res_nb)
+ return polled;
+
+ hws_send_engine_poll_cq(queue, res, &polled, res_nb);
+
+ return polled;
+}
+
+int mlx5hws_send_queue_poll(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ struct mlx5hws_flow_op_result res[],
+ u32 res_nb)
+{
+ return hws_send_engine_poll(&ctx->send_queue[queue_id], res, res_nb);
+}
+
+static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ void *sqc_data)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ size_t buf_sz;
+ int err;
+
+ sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->mdev = mdev;
+
+ param.db_numa_node = numa_node;
+ param.buf_numa_node = numa_node;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq->dep_wqe = kcalloc(queue->num_entries, sizeof(*sq->dep_wqe), GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto destroy_wq_cyc;
+ }
+
+ sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL);
+ if (!sq->dep_wqe) {
+ err = -ENOMEM;
+ goto free_dep_wqe;
+ }
+
+ sq->buf_mask = (queue->num_entries * MAX_WQES_PER_RULE) - 1;
+
+ return 0;
+
+free_dep_wqe:
+ kfree(sq->dep_wqe);
+destroy_wq_cyc:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ return err;
+}
+
+static void hws_send_ring_free_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ if (!sq)
+ return;
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, cq->mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ? MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sqn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void hws_send_ring_close_sq(struct mlx5hws_send_ring_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ kfree(sq->wr_priv);
+ kfree(sq->dep_wqe);
+}
+
+static int hws_send_ring_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ int err;
+
+ err = hws_send_ring_create_sq(mdev, pdn, sqc_data, queue, sq, cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ hws_send_ring_close_sq(sq);
+
+ return err;
+}
+
+static int hws_send_ring_open_sq(struct mlx5hws_context *ctx,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ring_sq *sq,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ size_t buf_sz, sq_log_buf_sz;
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ buf_sz = queue->num_entries * MAX_WQES_PER_RULE;
+ sq_log_buf_sz = ilog2(roundup_pow_of_two(buf_sz));
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, ctx->pd_num);
+ MLX5_SET(wq, wq, log_wq_sz, sq_log_buf_sz);
+
+ err = hws_send_ring_alloc_sq(ctx->mdev, numa_node, queue, sq, sqc_data);
+ if (err)
+ goto err_free_sqc;
+
+ err = hws_send_ring_create_sq_rdy(ctx->mdev, ctx->pd_num, sqc_data,
+ queue, sq, cq);
+ if (err)
+ goto err_free_sq;
+
+ kvfree(sqc_data);
+
+ return 0;
+err_free_sq:
+ hws_send_ring_free_sq(sq);
+err_free_sqc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void hws_cq_complete(struct mlx5_core_cq *mcq,
+ struct mlx5_eqe *eqe)
+{
+ pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static int hws_send_ring_alloc_cq(struct mlx5_core_dev *mdev,
+ int numa_node,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ struct mlx5_cqe64 *cqe;
+ int err;
+ u32 i;
+
+ param.buf_numa_node = numa_node;
+ param.db_numa_node = numa_node;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ mcq->comp = hws_cq_complete;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ void *cqc_data,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in, *cqc;
+ int inlen, eqn;
+ int err;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ struct mlx5hws_send_engine *queue,
+ int numa_node,
+ struct mlx5hws_send_ring_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+ if (err)
+ goto err_out;
+
+ err = hws_send_ring_create_cq(mdev, queue, cqc_data, cq);
+ if (err)
+ goto err_free_cq;
+
+ kvfree(cqc_data);
+
+ return 0;
+
+err_free_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static void hws_send_ring_close_cq(struct mlx5hws_send_ring_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static void hws_send_ring_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close_sq(&queue->send_ring.send_sq);
+ hws_send_ring_close_cq(&queue->send_ring.send_cq);
+}
+
+static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue)
+{
+ int numa_node = dev_to_node(mlx5_core_dma_dev(ctx->mdev));
+ struct mlx5hws_send_ring *ring = &queue->send_ring;
+ int err;
+
+ err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq);
+ if (err)
+ return err;
+
+ err = hws_send_ring_open_sq(ctx, numa_node, queue, &ring->send_sq,
+ &ring->send_cq);
+ if (err)
+ goto close_cq;
+
+ return err;
+
+close_cq:
+ hws_send_ring_close_cq(&ring->send_cq);
+ return err;
+}
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue)
+{
+ hws_send_ring_close(queue);
+ kfree(queue->completed.entries);
+}
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size)
+{
+ int err;
+
+ mutex_init(&queue->lock);
+
+ queue->num_entries = roundup_pow_of_two(queue_size);
+ queue->used_entries = 0;
+
+ queue->completed.entries = kcalloc(queue->num_entries,
+ sizeof(queue->completed.entries[0]),
+ GFP_KERNEL);
+ if (!queue->completed.entries)
+ return -ENOMEM;
+
+ queue->completed.pi = 0;
+ queue->completed.ci = 0;
+ queue->completed.mask = queue->num_entries - 1;
+ err = mlx5hws_send_ring_open(ctx, queue);
+ if (err)
+ goto free_completed_entries;
+
+ return 0;
+
+free_completed_entries:
+ kfree(queue->completed.entries);
+ return err;
+}
+
+static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
+{
+ while (queues--)
+ mlx5hws_send_queue_close(&ctx->send_queue[queues]);
+}
+
+static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
+{
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_destroy(&ctx->bwc_send_queue_locks[i]);
+ kfree(ctx->bwc_send_queue_locks);
+}
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx)
+{
+ hws_send_queues_bwc_locks_destroy(ctx);
+ __hws_send_queues_close(ctx, ctx->queues);
+ kfree(ctx->send_queue);
+}
+
+static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
+{
+ /* Number of BWC queues is equal to number of the usual HWS queues */
+ int bwc_queues = ctx->queues - 1;
+ int i;
+
+ if (!mlx5hws_context_bwc_supported(ctx))
+ return 0;
+
+ ctx->queues += bwc_queues;
+
+ ctx->bwc_send_queue_locks = kcalloc(bwc_queues,
+ sizeof(*ctx->bwc_send_queue_locks),
+ GFP_KERNEL);
+
+ if (!ctx->bwc_send_queue_locks)
+ return -ENOMEM;
+
+ for (i = 0; i < bwc_queues; i++)
+ mutex_init(&ctx->bwc_send_queue_locks[i]);
+
+ return 0;
+}
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size)
+{
+ int err = 0;
+ u32 i;
+
+ /* Open one extra queue for control path */
+ ctx->queues = queues + 1;
+
+ /* open a separate set of queues and locks for bwc API */
+ err = hws_bwc_send_queues_init(ctx);
+ if (err)
+ return err;
+
+ ctx->send_queue = kcalloc(ctx->queues, sizeof(*ctx->send_queue), GFP_KERNEL);
+ if (!ctx->send_queue) {
+ err = -ENOMEM;
+ goto free_bwc_locks;
+ }
+
+ for (i = 0; i < ctx->queues; i++) {
+ err = mlx5hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err)
+ goto close_send_queues;
+ }
+
+ return 0;
+
+close_send_queues:
+ __hws_send_queues_close(ctx, i);
+
+ kfree(ctx->send_queue);
+
+free_bwc_locks:
+ hws_send_queues_bwc_locks_destroy(ctx);
+
+ return err;
+}
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions)
+{
+ struct mlx5hws_send_ring_sq *send_sq;
+ struct mlx5hws_send_engine *queue;
+ bool wait_comp = false;
+ s64 polled = 0;
+
+ queue = &ctx->send_queue[queue_id];
+ send_sq = &queue->send_ring.send_sq;
+
+ switch (actions) {
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC:
+ wait_comp = true;
+ fallthrough;
+ case MLX5HWS_SEND_QUEUE_ACTION_DRAIN_ASYNC:
+ if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
+ /* Send dependent WQEs to drain the queue */
+ mlx5hws_send_all_dep_wqe(queue);
+ else
+ /* Signal on the last posted WQE */
+ mlx5hws_send_engine_flush_queue(queue);
+
+ /* Poll queue until empty */
+ while (wait_comp && !mlx5hws_send_engine_empty(queue))
+ hws_send_engine_poll_cq(queue, NULL, &polled, 0);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hws_send_wqe_fw(struct mlx5_core_dev *mdev,
+ u32 pd_num,
+ struct mlx5hws_send_engine_post_attr *send_attr,
+ struct mlx5hws_wqe_gta_ctrl_seg *send_wqe_ctrl,
+ void *send_wqe_match_data,
+ void *send_wqe_match_tag,
+ void *send_wqe_range_data,
+ void *send_wqe_range_tag,
+ bool is_jumbo,
+ u8 gta_opcode)
+{
+ bool has_range = send_wqe_range_data || send_wqe_range_tag;
+ bool has_match = send_wqe_match_data || send_wqe_match_tag;
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+ struct mlx5hws_wqe_gta_data_seg_ste gta_wqe_data1 = {0};
+ struct mlx5hws_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+ struct mlx5hws_cmd_generate_wqe_attr attr = {0};
+ struct mlx5hws_wqe_ctrl_seg wqe_ctrl = {0};
+ struct mlx5_cqe64 cqe;
+ u32 flags = 0;
+ int ret;
+
+ /* Set WQE control */
+ wqe_ctrl.opmod_idx_opcode = cpu_to_be32((send_attr->opmod << 24) | send_attr->opcode);
+ wqe_ctrl.qpn_ds = cpu_to_be32((send_attr->len + sizeof(struct mlx5hws_wqe_ctrl_seg)) / 16);
+ flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ wqe_ctrl.flags = cpu_to_be32(flags);
+ wqe_ctrl.imm = cpu_to_be32(send_attr->id);
+
+ /* Set GTA WQE CTRL */
+ memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+ gta_wqe_ctrl.op_dirix = cpu_to_be32(gta_opcode << 28);
+
+ /* Set GTA match WQE DATA */
+ if (has_match) {
+ if (send_wqe_match_data)
+ memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+ gta_wqe_data0.rsvd1_definer = cpu_to_be32(send_attr->match_definer_id << 8);
+ attr.gta_data_0 = (u8 *)&gta_wqe_data0;
+ }
+
+ /* Set GTA range WQE DATA */
+ if (has_range) {
+ if (send_wqe_range_data)
+ memcpy(&gta_wqe_data1, send_wqe_range_data, sizeof(gta_wqe_data1));
+ else
+ hws_send_wqe_set_tag(&gta_wqe_data1, send_wqe_range_tag, false);
+
+ gta_wqe_data1.rsvd1_definer = cpu_to_be32(send_attr->range_definer_id << 8);
+ attr.gta_data_1 = (u8 *)&gta_wqe_data1;
+ }
+
+ attr.pdn = pd_num;
+ attr.wqe_ctrl = (u8 *)&wqe_ctrl;
+ attr.gta_ctrl = (u8 *)&gta_wqe_ctrl;
+
+send_wqe:
+ ret = mlx5hws_cmd_generate_wqe(mdev, &attr, &cqe);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to write WQE using command");
+ return ret;
+ }
+
+ if ((get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+ (be32_to_cpu(cqe.byte_cnt) >> 31 == 0)) {
+ *send_attr->used_id = send_attr->id;
+ return 0;
+ }
+
+ /* Retry if rule failed */
+ if (send_attr->retry_id) {
+ wqe_ctrl.imm = cpu_to_be32(send_attr->retry_id);
+ send_attr->id = send_attr->retry_id;
+ send_attr->retry_id = 0;
+ goto send_wqe;
+ }
+
+ return -1;
+}
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr)
+{
+ struct mlx5hws_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+ struct mlx5hws_rule *rule = send_attr->rule;
+ struct mlx5_core_dev *mdev;
+ u16 queue_id;
+ u32 pdn;
+ int ret;
+
+ queue_id = queue - ctx->send_queue;
+ mdev = ctx->mdev;
+ pdn = ctx->pd_num;
+
+ /* Writing through FW can't HW fence, therefore we drain the queue */
+ if (send_attr->fence)
+ mlx5hws_send_queue_action(ctx,
+ queue_id,
+ MLX5HWS_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+ if (ste_attr->rtc_1) {
+ send_attr->id = ste_attr->rtc_1;
+ send_attr->used_id = ste_attr->used_id_rtc_1;
+ send_attr->retry_id = ste_attr->retry_rtc_1;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ if (ste_attr->rtc_0) {
+ send_attr->id = ste_attr->rtc_0;
+ send_attr->used_id = ste_attr->used_id_rtc_0;
+ send_attr->retry_id = ste_attr->retry_rtc_0;
+ ret = hws_send_wqe_fw(mdev, pdn, send_attr,
+ ste_attr->wqe_ctrl,
+ ste_attr->wqe_data,
+ ste_attr->wqe_tag,
+ ste_attr->range_wqe_data,
+ ste_attr->range_wqe_tag,
+ ste_attr->wqe_tag_is_jumbo,
+ ste_attr->gta_opcode);
+ if (ret)
+ goto fail_rule;
+ }
+
+ /* Increase the status, this only works on good flow as the enum
+ * is arrange it away creating -> created -> deleting -> deleted
+ */
+ if (likely(rule))
+ rule->status++;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_SUCCESS);
+
+ return;
+
+fail_rule:
+ if (likely(rule))
+ rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+ MLX5HWS_RULE_STATUS_FAILED : MLX5HWS_RULE_STATUS_FAILING;
+
+ mlx5hws_send_engine_gen_comp(queue, send_attr->user_data, MLX5HWS_FLOW_OP_ERROR);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
new file mode 100644
index 000000000000..b50825d6dc53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_send.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_SEND_H_
+#define MLX5HWS_SEND_H_
+
+/* As a single operation requires at least two WQEBBS.
+ * This means a maximum of 16 such operations per rule.
+ */
+#define MAX_WQES_PER_RULE 32
+
+enum mlx5hws_wqe_opcode {
+ MLX5HWS_WQE_OPCODE_TBL_ACCESS = 0x2c,
+};
+
+enum mlx5hws_wqe_opmod {
+ MLX5HWS_WQE_OPMOD_GTA_STE = 0,
+ MLX5HWS_WQE_OPMOD_GTA_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_opcode {
+ MLX5HWS_WQE_GTA_OP_ACTIVATE = 0,
+ MLX5HWS_WQE_GTA_OP_DEACTIVATE = 1,
+};
+
+enum mlx5hws_wqe_gta_opmod {
+ MLX5HWS_WQE_GTA_OPMOD_STE = 0,
+ MLX5HWS_WQE_GTA_OPMOD_MOD_ARG = 1,
+};
+
+enum mlx5hws_wqe_gta_sz {
+ MLX5HWS_WQE_SZ_GTA_CTRL = 48,
+ MLX5HWS_WQE_SZ_GTA_DATA = 64,
+};
+
+/* WQE Control segment. */
+struct mlx5hws_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ __be32 flags;
+ __be32 imm;
+};
+
+struct mlx5hws_wqe_gta_ctrl_seg {
+ __be32 op_dirix;
+ __be32 stc_ix[5];
+ __be32 rsvd0[6];
+};
+
+struct mlx5hws_wqe_gta_data_seg_ste {
+ __be32 rsvd0_ctr_id;
+ __be32 rsvd1_definer;
+ __be32 rsvd2[3];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
+};
+
+struct mlx5hws_wqe_gta_data_seg_arg {
+ __be32 action_args[8];
+};
+
+struct mlx5hws_wqe_gta {
+ struct mlx5hws_wqe_gta_ctrl_seg gta_ctrl;
+ union {
+ struct mlx5hws_wqe_gta_data_seg_ste seg_ste;
+ struct mlx5hws_wqe_gta_data_seg_arg seg_arg;
+ };
+};
+
+struct mlx5hws_send_ring_cq {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_cqwq wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_core_cq mcq;
+ u16 poll_wqe;
+};
+
+struct mlx5hws_send_ring_priv {
+ struct mlx5hws_rule *rule;
+ void *user_data;
+ u32 num_wqebbs;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+};
+
+struct mlx5hws_send_ring_dep_wqe {
+ struct mlx5hws_wqe_gta_ctrl_seg wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste wqe_data;
+ struct mlx5hws_rule *rule;
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 direct_index;
+ void *user_data;
+};
+
+struct mlx5hws_send_ring_sq {
+ struct mlx5_core_dev *mdev;
+ u16 cur_post;
+ u16 buf_mask;
+ struct mlx5hws_send_ring_priv *wr_priv;
+ unsigned int last_idx;
+ struct mlx5hws_send_ring_dep_wqe *dep_wqe;
+ unsigned int head_dep_idx;
+ unsigned int tail_dep_idx;
+ u32 sqn;
+ struct mlx5_wq_cyc wq;
+ struct mlx5_wq_ctrl wq_ctrl;
+ void __iomem *uar_map;
+};
+
+struct mlx5hws_send_ring {
+ struct mlx5hws_send_ring_cq send_cq;
+ struct mlx5hws_send_ring_sq send_sq;
+};
+
+struct mlx5hws_completed_poll_entry {
+ void *user_data;
+ enum mlx5hws_flow_op_status status;
+};
+
+struct mlx5hws_completed_poll {
+ struct mlx5hws_completed_poll_entry *entries;
+ u16 ci;
+ u16 pi;
+ u16 mask;
+};
+
+struct mlx5hws_send_engine {
+ struct mlx5hws_send_ring send_ring;
+ struct mlx5_uars_page *uar; /* Uar is shared between rings of a queue */
+ struct mlx5hws_completed_poll completed;
+ u16 used_entries;
+ u16 num_entries;
+ bool err;
+ struct mutex lock; /* Protects the send engine */
+};
+
+struct mlx5hws_send_engine_post_ctrl {
+ struct mlx5hws_send_engine *queue;
+ struct mlx5hws_send_ring *send_ring;
+ size_t num_wqebbs;
+};
+
+struct mlx5hws_send_engine_post_attr {
+ u8 opcode;
+ u8 opmod;
+ u8 notify_hw;
+ u8 fence;
+ u8 match_definer_id;
+ u8 range_definer_id;
+ size_t len;
+ struct mlx5hws_rule *rule;
+ u32 id;
+ u32 retry_id;
+ u32 *used_id;
+ void *user_data;
+};
+
+struct mlx5hws_send_ste_attr {
+ u32 rtc_0;
+ u32 rtc_1;
+ u32 retry_rtc_0;
+ u32 retry_rtc_1;
+ u32 *used_id_rtc_0;
+ u32 *used_id_rtc_1;
+ bool wqe_tag_is_jumbo;
+ u8 gta_opcode;
+ u32 direct_index;
+ struct mlx5hws_send_engine_post_attr send_attr;
+ struct mlx5hws_rule_match_tag *wqe_tag;
+ struct mlx5hws_rule_match_tag *range_wqe_tag;
+ struct mlx5hws_wqe_gta_ctrl_seg *wqe_ctrl;
+ struct mlx5hws_wqe_gta_data_seg_ste *wqe_data;
+ struct mlx5hws_wqe_gta_data_seg_ste *range_wqe_data;
+};
+
+struct mlx5hws_send_ring_dep_wqe *
+mlx5hws_send_add_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_abort_new_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_all_dep_wqe(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_queue_close(struct mlx5hws_send_engine *queue);
+
+int mlx5hws_send_queue_open(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ u16 queue_size);
+
+void mlx5hws_send_queues_close(struct mlx5hws_context *ctx);
+
+int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+int mlx5hws_send_queue_action(struct mlx5hws_context *ctx,
+ u16 queue_id,
+ u32 actions);
+
+int mlx5hws_send_test(struct mlx5hws_context *ctx,
+ u16 queues,
+ u16 queue_size);
+
+struct mlx5hws_send_engine_post_ctrl
+mlx5hws_send_engine_post_start(struct mlx5hws_send_engine *queue);
+
+void mlx5hws_send_engine_post_req_wqe(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ char **buf, size_t *len);
+
+void mlx5hws_send_engine_post_end(struct mlx5hws_send_engine_post_ctrl *ctrl,
+ struct mlx5hws_send_engine_post_attr *attr);
+
+void mlx5hws_send_ste(struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_stes_fw(struct mlx5hws_context *ctx,
+ struct mlx5hws_send_engine *queue,
+ struct mlx5hws_send_ste_attr *ste_attr);
+
+void mlx5hws_send_engine_flush_queue(struct mlx5hws_send_engine *queue);
+
+static inline bool mlx5hws_send_engine_empty(struct mlx5hws_send_engine *queue)
+{
+ struct mlx5hws_send_ring_sq *send_sq = &queue->send_ring.send_sq;
+ struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq;
+
+ return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe);
+}
+
+static inline bool mlx5hws_send_engine_full(struct mlx5hws_send_engine *queue)
+{
+ return queue->used_entries >= queue->num_entries;
+}
+
+static inline void mlx5hws_send_engine_inc_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries++;
+}
+
+static inline void mlx5hws_send_engine_dec_rule(struct mlx5hws_send_engine *queue)
+{
+ queue->used_entries--;
+}
+
+static inline void mlx5hws_send_engine_gen_comp(struct mlx5hws_send_engine *queue,
+ void *user_data,
+ int comp_status)
+{
+ struct mlx5hws_completed_poll *comp = &queue->completed;
+
+ comp->entries[comp->pi].status = comp_status;
+ comp->entries[comp->pi].user_data = user_data;
+
+ comp->pi = (comp->pi + 1) & comp->mask;
+}
+
+static inline bool mlx5hws_send_engine_err(struct mlx5hws_send_engine *queue)
+{
+ return queue->err;
+}
+
+#endif /* MLX5HWS_SEND_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
new file mode 100644
index 000000000000..8c063a8d87d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+{
+ return tbl->ft_id;
+}
+
+static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ ft_attr->type = tbl->fw_ft_type;
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+ ft_attr->rtc_valid = true;
+}
+
+static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+{
+ /* Enabling reformat_en or decap_en for the first flow table
+ * must be done when all VFs are down.
+ * However, HWS doesn't know when it is required to create the first FT.
+ * On the other hand, HWS doesn't use all these FT capabilities at all
+ * (the API doesn't even provide a way to specify these flags), so we'll
+ * just set these caps on all the flow tables.
+ * If HCA_CAP.fdb_dynamic_tunnel is set, this constraint is N/A.
+ */
+ if (!MLX5_CAP_ESW_FLOWTABLE(tbl->ctx->mdev, fdb_dynamic_tunnel)) {
+ ft_attr->reformat_en = true;
+ ft_attr->decap_en = true;
+ }
+}
+
+static int hws_table_up_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_cmd_set_fte_dest dest = {0};
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return 0;
+
+ if (ctx->common_res[tbl_type].default_miss) {
+ ctx->common_res[tbl_type].default_miss->refcount++;
+ return 0;
+ }
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
+ ft_attr.rtc_valid = false;
+
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
+
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
+
+ default_miss = mlx5hws_cmd_forward_tbl_create(ctx->mdev, &ft_attr, &fte_attr);
+ if (!default_miss) {
+ mlx5hws_err(ctx, "Failed to default miss table type: 0x%x\n", tbl_type);
+ return -EINVAL;
+ }
+
+ /* ctx->ctrl_lock must be held here */
+ ctx->common_res[tbl_type].default_miss = default_miss;
+ ctx->common_res[tbl_type].default_miss->refcount++;
+
+ return 0;
+}
+
+/* Called under ctx->ctrl_lock */
+static void hws_table_down_default_fdb_miss_tbl(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_cmd_forward_tbl *default_miss;
+ struct mlx5hws_context *ctx = tbl->ctx;
+ u8 tbl_type = tbl->type;
+
+ if (tbl->type != MLX5HWS_TABLE_TYPE_FDB)
+ return;
+
+ default_miss = ctx->common_res[tbl_type].default_miss;
+ if (--default_miss->refcount)
+ return;
+
+ mlx5hws_cmd_forward_tbl_destroy(ctx->mdev, default_miss);
+ ctx->common_res[tbl_type].default_miss = NULL;
+}
+
+static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ if (unlikely(tbl->type != MLX5HWS_TABLE_TYPE_FDB))
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+
+ mlx5hws_cmd_set_attr_connect_miss_tbl(tbl->ctx,
+ tbl->fw_ft_type,
+ tbl->type,
+ &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to connect FT to default FDB FT\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id)
+{
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+ hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed creating default ft\n");
+ return ret;
+ }
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB) {
+ /* Take/create ref over the default miss */
+ ret = hws_table_up_default_fdb_miss_tbl(tbl);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to get default fdb miss\n");
+ goto free_ft_obj;
+ }
+ ret = hws_table_connect_to_default_miss_tbl(tbl, *ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed connecting to default miss tbl\n");
+ goto down_miss_tbl;
+ }
+ }
+
+ return 0;
+
+down_miss_tbl:
+ hws_table_down_default_fdb_miss_tbl(tbl);
+free_ft_obj:
+ mlx5hws_cmd_flow_table_destroy(mdev, ft_attr.type, *ft_id);
+ return ret;
+}
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id)
+{
+ mlx5hws_cmd_flow_table_destroy(tbl->ctx->mdev, tbl->fw_ft_type, ft_id);
+ hws_table_down_default_fdb_miss_tbl(tbl);
+}
+
+static int hws_table_init_check_hws_support(struct mlx5hws_context *ctx,
+ struct mlx5hws_table *tbl)
+{
+ if (!(ctx->flags & MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT)) {
+ mlx5hws_err(ctx, "HWS not supported, cannot create mlx5hws_table\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int hws_table_init(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ ret = hws_table_init_check_hws_support(ctx, tbl);
+ if (ret)
+ return ret;
+
+ if (mlx5hws_table_get_fw_ft_type(tbl->type, (u8 *)&tbl->fw_ft_type)) {
+ pr_warn("HWS: invalid table type %d\n", tbl->type);
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+ }
+
+ ret = mlx5hws_action_get_default_stc(ctx, tbl->type);
+ if (ret)
+ goto tbl_destroy;
+
+ INIT_LIST_HEAD(&tbl->matchers_list);
+ INIT_LIST_HEAD(&tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return 0;
+
+tbl_destroy:
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static void hws_table_uninit(struct mlx5hws_table *tbl)
+{
+ mutex_lock(&tbl->ctx->ctrl_lock);
+ mlx5hws_action_put_default_stc(tbl->ctx, tbl->type);
+ mlx5hws_table_destroy_default_ft(tbl, tbl->ft_id);
+ mutex_unlock(&tbl->ctx->ctrl_lock);
+}
+
+struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ struct mlx5hws_table_attr *attr)
+{
+ struct mlx5hws_table *tbl;
+ int ret;
+
+ if (attr->type > MLX5HWS_TABLE_TYPE_FDB) {
+ mlx5hws_err(ctx, "Invalid table type %d\n", attr->type);
+ return NULL;
+ }
+
+ tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+ mlx5hws_err(ctx, "Failed to initialise table\n");
+ goto free_tbl;
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+ list_add(&tbl->tbl_list_node, &ctx->tbl_list);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ return tbl;
+
+free_tbl:
+ kfree(tbl);
+ return NULL;
+}
+
+int mlx5hws_table_destroy(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ int ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+ if (!list_empty(&tbl->matchers_list)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table containing matchers\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ if (!list_empty(&tbl->default_miss.head)) {
+ mlx5hws_err(tbl->ctx, "Cannot destroy table pointed by default miss\n");
+ ret = -EBUSY;
+ goto unlock_err;
+ }
+
+ list_del_init(&tbl->tbl_list_node);
+ mutex_unlock(&ctx->ctrl_lock);
+
+ hws_table_uninit(tbl);
+ kfree(tbl);
+
+ return 0;
+
+unlock_err:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
+
+static u32 hws_table_get_last_ft(struct mlx5hws_table *tbl)
+{
+ struct mlx5hws_matcher *matcher;
+
+ if (list_empty(&tbl->matchers_list))
+ return tbl->ft_id;
+
+ matcher = list_last_entry(&tbl->matchers_list, struct mlx5hws_matcher, list_node);
+ return matcher->end_ft_id;
+}
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+ int ret;
+
+ /* Due to FW limitation, resetting the flow table to default action will
+ * disconnect RTC when ignore_flow_level_rtc_valid is not supported.
+ */
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid)
+ return 0;
+
+ if (tbl->type == MLX5HWS_TABLE_TYPE_FDB)
+ return hws_table_connect_to_default_miss_tbl(tbl, ft_id);
+
+ ft_attr.type = tbl->fw_ft_type;
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_DEFAULT;
+
+ ret = mlx5hws_cmd_flow_table_modify(tbl->ctx->mdev, &ft_attr, ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to set FT default miss action\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_RTC_ID;
+ ft_attr.type = fw_ft_type;
+ ft_attr.rtc_id_0 = rtc_0_id;
+ ft_attr.rtc_id_1 = rtc_1_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+static int hws_table_ft_set_next_ft(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 next_ft_id)
+{
+ struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
+
+ ft_attr.modify_fs = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION;
+ ft_attr.table_miss_action = MLX5_IFC_MODIFY_FLOW_TABLE_MISS_ACTION_GOTO_TBL;
+ ft_attr.type = fw_ft_type;
+ ft_attr.table_miss_id = next_ft_id;
+
+ return mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, ft_id);
+}
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_table *src_tbl;
+ int ret;
+
+ if (list_empty(&dst_tbl->default_miss.head))
+ return 0;
+
+ list_for_each_entry(src_tbl, &dst_tbl->default_miss.head, default_miss.next) {
+ ret = mlx5hws_table_connect_to_miss_table(src_tbl, dst_tbl);
+ if (ret) {
+ mlx5hws_err(dst_tbl->ctx,
+ "Failed to update source miss table, unexpected behavior\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl)
+{
+ struct mlx5hws_matcher *matcher;
+ u32 last_ft_id;
+ int ret;
+
+ last_ft_id = hws_table_get_last_ft(src_tbl);
+
+ if (dst_tbl) {
+ if (list_empty(&dst_tbl->matchers_list)) {
+ /* Connect src_tbl last_ft to dst_tbl start anchor */
+ ret = hws_table_ft_set_next_ft(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ dst_tbl->ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ /* Connect src_tbl last_ft to first matcher RTC */
+ matcher = list_first_entry(&dst_tbl->matchers_list,
+ struct mlx5hws_matcher,
+ list_node);
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ matcher->match_ste.rtc_0_id,
+ matcher->match_ste.rtc_1_id);
+ if (ret)
+ return ret;
+
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* Reset next miss FT to default */
+ ret = mlx5hws_table_ft_set_default_next_ft(src_tbl, last_ft_id);
+ if (ret)
+ return ret;
+
+ /* Reset last_ft RTC to default RTC */
+ ret = mlx5hws_table_ft_set_next_rtc(src_tbl->ctx,
+ last_ft_id,
+ src_tbl->fw_ft_type,
+ 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ src_tbl->default_miss.miss_tbl = dst_tbl;
+
+ return 0;
+}
+
+static int hws_table_set_default_miss_not_valid(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ if (!tbl->ctx->caps->nic_ft.ignore_flow_level_rtc_valid) {
+ mlx5hws_err(tbl->ctx, "Default miss table is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((miss_tbl && miss_tbl->type != tbl->type)) {
+ mlx5hws_err(tbl->ctx, "Invalid arguments\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
+ struct mlx5hws_table *miss_tbl)
+{
+ struct mlx5hws_context *ctx = tbl->ctx;
+ struct mlx5hws_table *old_miss_tbl;
+ int ret;
+
+ ret = hws_table_set_default_miss_not_valid(tbl, miss_tbl);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ctx->ctrl_lock);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ ret = mlx5hws_table_connect_to_miss_table(tbl, miss_tbl);
+ if (ret)
+ goto out;
+
+ if (old_miss_tbl)
+ list_del_init(&tbl->default_miss.next);
+
+ old_miss_tbl = tbl->default_miss.miss_tbl;
+ if (old_miss_tbl)
+ list_del_init(&old_miss_tbl->default_miss.head);
+
+ if (miss_tbl)
+ list_add(&tbl->default_miss.next, &miss_tbl->default_miss.head);
+
+ mutex_unlock(&ctx->ctrl_lock);
+ return 0;
+out:
+ mutex_unlock(&ctx->ctrl_lock);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
new file mode 100644
index 000000000000..dd50420eec9e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_table.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_TABLE_H_
+#define MLX5HWS_TABLE_H_
+
+struct mlx5hws_default_miss {
+ /* My miss table */
+ struct mlx5hws_table *miss_tbl;
+ struct list_head next;
+ /* Tables missing to my table */
+ struct list_head head;
+};
+
+struct mlx5hws_table {
+ struct mlx5hws_context *ctx;
+ u32 ft_id;
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+};
+
+static inline
+u32 mlx5hws_table_get_fw_ft_type(enum mlx5hws_table_type type,
+ u8 *ret_type)
+{
+ if (type != MLX5HWS_TABLE_TYPE_FDB)
+ return -EOPNOTSUPP;
+
+ *ret_type = FS_FT_FDB;
+
+ return 0;
+}
+
+static inline
+u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+ bool is_mirror)
+{
+ if (tbl_type == MLX5HWS_TABLE_TYPE_FDB)
+ return is_mirror ? FS_FT_FDB_TX : FS_FT_FDB_RX;
+
+ return 0;
+}
+
+int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+ u32 *ft_id);
+
+void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+
+int mlx5hws_table_connect_to_miss_table(struct mlx5hws_table *src_tbl,
+ struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_update_connected_miss_tables(struct mlx5hws_table *dst_tbl);
+
+int mlx5hws_table_ft_set_default_next_ft(struct mlx5hws_table *tbl, u32 ft_id);
+
+int mlx5hws_table_ft_set_next_rtc(struct mlx5hws_context *ctx,
+ u32 ft_id,
+ u32 fw_ft_type,
+ u32 rtc_0_id,
+ u32 rtc_1_id);
+
+#endif /* MLX5HWS_TABLE_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
new file mode 100644
index 000000000000..faf42421c43f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#include "mlx5hws_internal.h"
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx)
+{
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return 0;
+
+ xa_init(&ctx->vports.vport_gvmi_xa);
+
+ /* Set gvmi for eswitch manager and uplink vports only. Rest of the vports
+ * (vport 0 of other function, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, false, 0, &ctx->vports.esw_manager_gvmi);
+ if (ret)
+ return ret;
+
+ ctx->vports.uplink_gvmi = 0;
+ return 0;
+}
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx)
+{
+ if (ctx->caps->eswitch_manager)
+ xa_destroy(&ctx->vports.vport_gvmi_xa);
+}
+
+static int hws_vport_add_gvmi(struct mlx5hws_context *ctx, u16 vport)
+{
+ u16 vport_gvmi;
+ int ret;
+
+ ret = mlx5hws_cmd_query_gvmi(ctx->mdev, true, vport, &vport_gvmi);
+ if (ret)
+ return -EINVAL;
+
+ ret = xa_insert(&ctx->vports.vport_gvmi_xa, vport,
+ xa_mk_value(vport_gvmi), GFP_KERNEL);
+ if (ret)
+ mlx5hws_dbg(ctx, "Couldn't insert new vport gvmi into xarray (%d)\n", ret);
+
+ return ret;
+}
+
+static bool hws_vport_is_esw_mgr_vport(struct mlx5hws_context *ctx, u16 vport)
+{
+ return ctx->caps->is_ecpf ? vport == MLX5_VPORT_ECPF :
+ vport == MLX5_VPORT_PF;
+}
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi)
+{
+ void *entry;
+ int ret;
+
+ if (!ctx->caps->eswitch_manager)
+ return -EINVAL;
+
+ if (hws_vport_is_esw_mgr_vport(ctx, vport)) {
+ *vport_gvmi = ctx->vports.esw_manager_gvmi;
+ return 0;
+ }
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ *vport_gvmi = ctx->vports.uplink_gvmi;
+ return 0;
+ }
+
+load_entry:
+ entry = xa_load(&ctx->vports.vport_gvmi_xa, vport);
+
+ if (!xa_is_value(entry)) {
+ ret = hws_vport_add_gvmi(ctx, vport);
+ if (ret && ret != -EBUSY)
+ return ret;
+ goto load_entry;
+ }
+
+ *vport_gvmi = (u16)xa_to_value(entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
new file mode 100644
index 000000000000..0912fc166b3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws_vport.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+
+#ifndef MLX5HWS_VPORT_H_
+#define MLX5HWS_VPORT_H_
+
+int mlx5hws_vport_init_vports(struct mlx5hws_context *ctx);
+
+void mlx5hws_vport_uninit_vports(struct mlx5hws_context *ctx);
+
+int mlx5hws_vport_get_gvmi(struct mlx5hws_context *ctx, u16 vport, u16 *vport_gvmi);
+
+#endif /* MLX5HWS_VPORT_H_ */
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index ce2435987fb6..ee046468652c 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,16 +46,18 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
- select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
+ select PHYLINK
help
- Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip
+ Support for the Microchip LAN743x and PCI11x1x families of PCI
+ Express Ethernet devices
To compile this driver as a module, choose M here. The module will be
called lan743x.
+source "drivers/net/ethernet/microchip/lan865x/Kconfig"
source "drivers/net/ethernet/microchip/lan966x/Kconfig"
source "drivers/net/ethernet/microchip/sparx5/Kconfig"
source "drivers/net/ethernet/microchip/vcap/Kconfig"
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 94045537b643..3c65baed9fd8 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_LAN743X) += lan743x.o
lan743x-objs := lan743x_main.o lan743x_ethtool.o lan743x_ptp.o
+obj-$(CONFIG_LAN865X) += lan865x/
obj-$(CONFIG_LAN966X_SWITCH) += lan966x/
obj-$(CONFIG_SPARX5_SWITCH) += sparx5/
obj-$(CONFIG_VCAP) += vcap/
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 0f1c0edec460..1a1cbd034eda 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1054,61 +1054,55 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 buf;
- int ret;
-
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
- ret = phy_ethtool_get_eee(phydev, eee);
- if (ret < 0)
- return ret;
+ eee->tx_lpi_timer = lan743x_csr_read(adapter,
+ MAC_EEE_TX_LPI_REQ_DLY_CNT);
- buf = lan743x_csr_read(adapter, MAC_CR);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
- eee->tx_lpi_timer = buf;
- } else {
- eee->tx_lpi_timer = 0;
- }
-
- return 0;
+ return phylink_ethtool_get_eee(adapter->phylink, eee);
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct ethtool_keee *eee)
{
- struct lan743x_adapter *adapter;
- struct phy_device *phydev;
- u32 buf = 0;
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 tx_lpi_timer;
- if (!netdev)
- return -EINVAL;
- adapter = netdev_priv(netdev);
- if (!adapter)
- return -EINVAL;
- phydev = netdev->phydev;
- if (!phydev)
- return -EIO;
- if (!phydev->drv) {
- netif_err(adapter, drv, adapter->netdev,
- "Missing PHY Driver\n");
- return -EIO;
- }
+ tx_lpi_timer = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
+ if (tx_lpi_timer != eee->tx_lpi_timer) {
+ u32 mac_cr = lan743x_csr_read(adapter, MAC_CR);
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared.
+ * This function will trigger an autonegotiation restart and
+ * eee will be reenabled during link up if eee was negotiated.
+ */
+ lan743x_mac_eee_enable(adapter, false);
+ lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT,
+ eee->tx_lpi_timer);
- if (eee->eee_enabled) {
- buf = (u32)eee->tx_lpi_timer;
- lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
+ if (mac_cr & MAC_CR_EEE_EN_)
+ lan743x_mac_eee_enable(adapter, true);
}
- return phy_ethtool_set_eee(phydev, eee);
+ return phylink_ethtool_set_eee(adapter->phylink, eee);
+}
+
+static int
+lan743x_ethtool_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_set(adapter->phylink, cmd);
+}
+
+static int
+lan743x_ethtool_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ return phylink_ethtool_ksettings_get(adapter->phylink, cmd);
}
#ifdef CONFIG_PM
@@ -1120,8 +1114,7 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (netdev->phydev)
- phy_ethtool_get_wol(netdev->phydev, wol);
+ phylink_ethtool_get_wol(adapter->phylink, wol);
if (wol->supported != adapter->phy_wol_supported)
netif_warn(adapter, drv, adapter->netdev,
@@ -1162,7 +1155,7 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
!(adapter->phy_wol_supported & WAKE_MAGICSECURE))
phy_wol.wolopts &= ~WAKE_MAGIC;
- ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ ret = phylink_ethtool_set_wol(adapter->phylink, wol);
if (ret && (ret != -EOPNOTSUPP))
return ret;
@@ -1351,44 +1344,16 @@ static void lan743x_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct lan743x_phy *phy = &adapter->phy;
- if (phy->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
- if (phy->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
- pause->autoneg = phy->fc_autoneg;
+ phylink_ethtool_get_pauseparam(adapter->phylink, pause);
}
static int lan743x_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct lan743x_adapter *adapter = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- struct lan743x_phy *phy = &adapter->phy;
-
- if (!phydev)
- return -ENODEV;
-
- if (!phy_validate_pause(phydev, pause))
- return -EINVAL;
-
- phy->fc_request_control = 0;
- if (pause->rx_pause)
- phy->fc_request_control |= FLOW_CTRL_RX;
- if (pause->tx_pause)
- phy->fc_request_control |= FLOW_CTRL_TX;
-
- phy->fc_autoneg = pause->autoneg;
-
- if (pause->autoneg == AUTONEG_DISABLE)
- lan743x_mac_flow_ctrl_set_enables(adapter, pause->tx_pause,
- pause->rx_pause);
- else
- phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(adapter->phylink, pause);
}
const struct ethtool_ops lan743x_ethtool_ops = {
@@ -1413,8 +1378,8 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = lan743x_ethtool_get_link_ksettings,
+ .set_link_ksettings = lan743x_ethtool_set_link_ksettings,
.get_regs_len = lan743x_get_regs_len,
.get_regs = lan743x_get_regs,
.get_pauseparam = lan743x_get_pauseparam,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e418539565b1..4dc5adcda6a3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/iopoll.h>
#include <linux/crc16.h>
+#include <linux/phylink.h>
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
@@ -992,6 +993,42 @@ static int lan743x_sgmii_write(struct lan743x_adapter *adapter,
return ret;
}
+static int lan743x_get_lsd(int speed, int duplex, u8 mss)
+{
+ int lsd;
+
+ switch (speed) {
+ case SPEED_2500:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_2500_SLAVE;
+ else
+ lsd = LINK_2500_MASTER;
+ break;
+ case SPEED_1000:
+ if (mss == MASTER_SLAVE_STATE_SLAVE)
+ lsd = LINK_1000_SLAVE;
+ else
+ lsd = LINK_1000_MASTER;
+ break;
+ case SPEED_100:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_100FD;
+ else
+ lsd = LINK_100HD;
+ break;
+ case SPEED_10:
+ if (duplex == DUPLEX_FULL)
+ lsd = LINK_10FD;
+ else
+ lsd = LINK_10HD;
+ break;
+ default:
+ lsd = -EINVAL;
+ }
+
+ return lsd;
+}
+
static int lan743x_sgmii_mpll_set(struct lan743x_adapter *adapter,
u16 baud)
{
@@ -1041,26 +1078,7 @@ static int lan743x_sgmii_2_5G_mode_set(struct lan743x_adapter *adapter,
VR_MII_BAUD_RATE_1P25GBPS);
}
-static int lan743x_is_sgmii_2_5G_mode(struct lan743x_adapter *adapter,
- bool *status)
-{
- int ret;
-
- ret = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2,
- VR_MII_GEN2_4_MPLL_CTRL1);
- if (ret < 0)
- return ret;
-
- if (ret == VR_MII_MPLL_MULTIPLIER_125 ||
- ret == VR_MII_MPLL_MULTIPLIER_50)
- *status = true;
- else
- *status = false;
-
- return 0;
-}
-
-static int lan743x_sgmii_aneg_update(struct lan743x_adapter *adapter)
+static int lan743x_serdes_clock_and_aneg_update(struct lan743x_adapter *adapter)
{
enum lan743x_sgmii_lsd lsd = adapter->sgmii_lsd;
int mii_ctrl;
@@ -1147,68 +1165,11 @@ static int lan743x_pcs_seq_state(struct lan743x_adapter *adapter, u8 state)
return 0;
}
-static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
+static int lan743x_pcs_power_reset(struct lan743x_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
- enum lan743x_sgmii_lsd lsd = POWER_DOWN;
int mii_ctl;
- bool status;
int ret;
- switch (phydev->speed) {
- case SPEED_2500:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_2500_MASTER;
- else
- lsd = LINK_2500_SLAVE;
- break;
- case SPEED_1000:
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_MASTER)
- lsd = LINK_1000_MASTER;
- else
- lsd = LINK_1000_SLAVE;
- break;
- case SPEED_100:
- if (phydev->duplex)
- lsd = LINK_100FD;
- else
- lsd = LINK_100HD;
- break;
- case SPEED_10:
- if (phydev->duplex)
- lsd = LINK_10FD;
- else
- lsd = LINK_10HD;
- break;
- default:
- netif_err(adapter, drv, adapter->netdev,
- "Invalid speed %d\n", phydev->speed);
- return -EINVAL;
- }
-
- adapter->sgmii_lsd = lsd;
- ret = lan743x_sgmii_aneg_update(adapter);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII cfg failed\n", ret);
- return ret;
- }
-
- ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
- if (ret < 0) {
- netif_err(adapter, drv, adapter->netdev,
- "error %d SGMII get mode failed\n", ret);
- return ret;
- }
-
- if (status)
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 2.5G mode enable\n");
- else
- netif_dbg(adapter, drv, adapter->netdev,
- "SGMII 1G mode enable\n");
-
/* SGMII/1000/2500BASE-X PCS power down */
mii_ctl = lan743x_sgmii_read(adapter, MDIO_MMD_VEND2, MII_BMCR);
if (mii_ctl < 0)
@@ -1229,11 +1190,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
if (ret < 0)
return ret;
- ret = lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return lan743x_pcs_seq_state(adapter, PCS_POWER_STATE_UP);
}
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
@@ -1389,103 +1346,11 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
50000, 1000000);
}
-static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u16 local_adv, u16 remote_adv)
-{
- struct lan743x_phy *phy = &adapter->phy;
- u8 cap;
-
- if (phy->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
- else
- cap = phy->fc_request_control;
-
- lan743x_mac_flow_ctrl_set_enables(adapter,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
static int lan743x_phy_init(struct lan743x_adapter *adapter)
{
return lan743x_phy_reset(adapter);
}
-static void lan743x_phy_link_status_change(struct net_device *netdev)
-{
- struct lan743x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- u32 data;
-
- phy_print_status(phydev);
- if (phydev->state == PHY_RUNNING) {
- int remote_advertisement = 0;
- int local_advertisement = 0;
-
- data = lan743x_csr_read(adapter, MAC_CR);
-
- /* set duplex mode */
- if (phydev->duplex)
- data |= MAC_CR_DPX_;
- else
- data &= ~MAC_CR_DPX_;
-
- /* set bus speed */
- switch (phydev->speed) {
- case SPEED_10:
- data &= ~MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_100:
- data &= ~MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- case SPEED_1000:
- data |= MAC_CR_CFG_H_;
- data &= ~MAC_CR_CFG_L_;
- break;
- case SPEED_2500:
- data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
- break;
- }
- lan743x_csr_write(adapter, MAC_CR, data);
-
- local_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->advertising);
- remote_advertisement =
- linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
-
- lan743x_phy_update_flowcontrol(adapter, local_advertisement,
- remote_advertisement);
- lan743x_ptp_update_latency(adapter, phydev->speed);
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII ||
- phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
- phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
- lan743x_sgmii_config(adapter);
-
- data = lan743x_csr_read(adapter, MAC_CR);
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, data);
- }
-}
-
-static void lan743x_phy_close(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct phy_device *phydev = netdev->phydev;
-
- phy_stop(netdev->phydev);
- phy_disconnect(netdev->phydev);
-
- /* using phydev here as phy_disconnect NULLs netdev->phydev */
- if (phy_is_pseudo_fixed_link(phydev))
- fixed_phy_unregister(phydev);
-
-}
-
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
{
u32 id_rev;
@@ -1502,65 +1367,9 @@ static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
adapter->phy_interface = PHY_INTERFACE_MODE_MII;
else
adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
-}
-
-static int lan743x_phy_open(struct lan743x_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct lan743x_phy *phy = &adapter->phy;
- struct fixed_phy_status fphy_status = {
- .link = 1,
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- };
- struct phy_device *phydev;
- int ret = -EIO;
-
- /* try devicetree phy, or fixed link */
- phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
- lan743x_phy_link_status_change);
-
- if (!phydev) {
- /* try internal phy */
- phydev = phy_find_first(adapter->mdiobus);
- if (!phydev) {
- if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
- ID_REV_ID_LAN7431_) {
- phydev = fixed_phy_register(PHY_POLL,
- &fphy_status, NULL);
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "No PHY/fixed_PHY found\n");
- return PTR_ERR(phydev);
- }
- } else {
- goto return_error;
- }
- }
-
- lan743x_phy_interface_select(adapter);
-
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- adapter->phy_interface);
- if (ret)
- goto return_error;
- }
-
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- phy_support_asym_pause(phydev);
- phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phy->fc_autoneg = phydev->autoneg;
-
- phy_start(phydev);
- phy_start_aneg(phydev);
- phy_attached_info(phydev);
- return 0;
-return_error:
- return ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "selected phy interface: 0x%X\n", adapter->phy_interface);
}
static void lan743x_rfe_open(struct lan743x_adapter *adapter)
@@ -3061,6 +2870,336 @@ return_error:
return ret;
}
+static int lan743x_phylink_sgmii_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from the External PHY via SGMII */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d sgmii aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_1000basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_1000, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 1000BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 1000basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+static int lan743x_phylink_2500basex_config(struct lan743x_adapter *adapter)
+{
+ u32 sgmii_ctl;
+ int ret;
+
+ ret = lan743x_get_lsd(SPEED_2500, DUPLEX_FULL,
+ MASTER_SLAVE_STATE_MASTER);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d link-speed-duplex(LSD) invalid\n", ret);
+ return ret;
+ }
+
+ adapter->sgmii_lsd = ret;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Link Speed Duplex (lsd) : 0x%X\n", adapter->sgmii_lsd);
+
+ /* LINK_STATUS_SOURCE from 2500BASE-X PCS link status */
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_LINK_STATUS_SOURCE_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+
+ ret = lan743x_serdes_clock_and_aneg_update(adapter);
+ if (ret < 0) {
+ netif_err(adapter, drv, adapter->netdev,
+ "error %d 2500basex aneg update failed\n", ret);
+ return ret;
+ }
+
+ return lan743x_pcs_power_reset(adapter);
+}
+
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable)
+{
+ u32 mac_cr;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+ else
+ mac_cr &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+}
+
+static void lan743x_phylink_mac_config(struct phylink_config *config,
+ unsigned int link_an_mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_2500BASEX:
+ ret = lan743x_phylink_2500basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "2500BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "2500BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = lan743x_phylink_1000basex_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "1000BASEX config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "1000BASEX mode selected and configured\n");
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = lan743x_phylink_sgmii_config(adapter);
+ if (ret < 0)
+ netif_err(adapter, drv, adapter->netdev,
+ "SGMII config failed. Error %d\n", ret);
+ else
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII mode selected and configured\n");
+ break;
+ default:
+ netif_dbg(adapter, drv, adapter->netdev,
+ "RGMII/GMII/MII(0x%X) mode enable\n",
+ state->interface);
+ break;
+ }
+}
+
+static void lan743x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int link_an_mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
+ netif_tx_stop_all_queues(to_net_dev(config->dev));
+ lan743x_mac_eee_enable(adapter, false);
+}
+
+static void lan743x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int link_an_mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+ int mac_cr;
+ u8 cap;
+
+ mac_cr = lan743x_csr_read(adapter, MAC_CR);
+ /* Pre-initialize register bits.
+ * Resulting value corresponds to SPEED_10
+ */
+ mac_cr &= ~(MAC_CR_CFG_H_ | MAC_CR_CFG_L_);
+ if (speed == SPEED_2500)
+ mac_cr |= MAC_CR_CFG_H_ | MAC_CR_CFG_L_;
+ else if (speed == SPEED_1000)
+ mac_cr |= MAC_CR_CFG_H_;
+ else if (speed == SPEED_100)
+ mac_cr |= MAC_CR_CFG_L_;
+
+ lan743x_csr_write(adapter, MAC_CR, mac_cr);
+
+ lan743x_ptp_update_latency(adapter, speed);
+
+ /* Flow Control operation */
+ cap = 0;
+ if (tx_pause)
+ cap |= FLOW_CTRL_TX;
+ if (rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ lan743x_mac_flow_ctrl_set_enables(adapter,
+ cap & FLOW_CTRL_TX,
+ cap & FLOW_CTRL_RX);
+
+ if (phydev)
+ lan743x_mac_eee_enable(adapter, phydev->enable_tx_lpi);
+
+ netif_tx_wake_all_queues(netdev);
+}
+
+static const struct phylink_mac_ops lan743x_phylink_mac_ops = {
+ .mac_config = lan743x_phylink_mac_config,
+ .mac_link_down = lan743x_phylink_mac_link_down,
+ .mac_link_up = lan743x_phylink_mac_link_up,
+};
+
+static int lan743x_phylink_create(struct lan743x_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct phylink *pl;
+
+ adapter->phylink_config.dev = &netdev->dev;
+ adapter->phylink_config.type = PHYLINK_NETDEV;
+ adapter->phylink_config.mac_managed_pm = false;
+
+ adapter->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ lan743x_phy_interface_select(adapter);
+
+ switch (adapter->phy_interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ adapter->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ adapter->phylink_config.supported_interfaces);
+ adapter->phylink_config.mac_capabilities |= MAC_2500FD;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ adapter->phylink_config.supported_interfaces);
+ break;
+ default:
+ phy_interface_set_rgmii(adapter->phylink_config.supported_interfaces);
+ }
+
+ pl = phylink_create(&adapter->phylink_config, NULL,
+ adapter->phy_interface, &lan743x_phylink_mac_ops);
+
+ if (IS_ERR(pl)) {
+ netdev_err(netdev, "Could not create phylink (%pe)\n", pl);
+ return PTR_ERR(pl);
+ }
+
+ adapter->phylink = pl;
+ netdev_dbg(netdev, "lan743x phylink created");
+
+ return 0;
+}
+
+static bool lan743x_phy_handle_exists(struct device_node *dn)
+{
+ dn = of_parse_phandle(dn, "phy-handle", 0);
+ of_node_put(dn);
+ return dn != NULL;
+}
+
+static int lan743x_phylink_connect(struct lan743x_adapter *adapter)
+{
+ struct device_node *dn = adapter->pdev->dev.of_node;
+ struct net_device *dev = adapter->netdev;
+ struct phy_device *phydev;
+ int ret;
+
+ if (dn)
+ ret = phylink_of_phy_connect(adapter->phylink, dn, 0);
+
+ if (!dn || (ret && !lan743x_phy_handle_exists(dn))) {
+ phydev = phy_find_first(adapter->mdiobus);
+ if (phydev) {
+ /* attach the mac to the phy */
+ ret = phylink_connect_phy(adapter->phylink, phydev);
+ } else if (((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) || adapter->is_pci11x1x) {
+ struct phylink_link_state state;
+ unsigned long caps;
+
+ caps = adapter->phylink_config.mac_capabilities;
+ if (caps & MAC_2500FD) {
+ state.speed = SPEED_2500;
+ state.duplex = DUPLEX_FULL;
+ } else if (caps & MAC_1000FD) {
+ state.speed = SPEED_1000;
+ state.duplex = DUPLEX_FULL;
+ } else {
+ state.speed = SPEED_UNKNOWN;
+ state.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ret = phylink_set_fixed_link(adapter->phylink, &state);
+ if (ret) {
+ netdev_err(dev, "Could not set fixed link\n");
+ return ret;
+ }
+ } else {
+ netdev_err(dev, "no PHY found\n");
+ return -ENXIO;
+ }
+ }
+
+ if (ret) {
+ netdev_err(dev, "Could not attach PHY (%d)\n", ret);
+ return ret;
+ }
+
+ phylink_start(adapter->phylink);
+
+ return 0;
+}
+
+static void lan743x_phylink_disconnect(struct lan743x_adapter *adapter)
+{
+ phylink_stop(adapter->phylink);
+ phylink_disconnect_phy(adapter->phylink);
+}
+
static int lan743x_netdev_close(struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -3074,7 +3213,7 @@ static int lan743x_netdev_close(struct net_device *netdev)
lan743x_ptp_close(adapter);
- lan743x_phy_close(adapter);
+ lan743x_phylink_disconnect(adapter);
lan743x_mac_close(adapter);
@@ -3097,13 +3236,13 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_intr;
- ret = lan743x_phy_open(adapter);
+ ret = lan743x_phylink_connect(adapter);
if (ret)
goto close_mac;
ret = lan743x_ptp_open(adapter);
if (ret)
- goto close_phy;
+ goto close_mac;
lan743x_rfe_open(adapter);
@@ -3119,6 +3258,9 @@ static int lan743x_netdev_open(struct net_device *netdev)
goto close_tx;
}
+ if (netdev->phydev)
+ phy_support_eee(netdev->phydev);
+
#ifdef CONFIG_PM
if (adapter->netdev->phydev) {
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
@@ -3143,9 +3285,8 @@ close_rx:
lan743x_rx_close(&adapter->rx[index]);
}
lan743x_ptp_close(adapter);
-
-close_phy:
- lan743x_phy_close(adapter);
+ if (adapter->phylink)
+ lan743x_phylink_disconnect(adapter);
close_mac:
lan743x_mac_close(adapter);
@@ -3174,11 +3315,14 @@ static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
static int lan743x_netdev_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
+ struct lan743x_adapter *adapter = netdev_priv(netdev);
+
if (!netif_running(netdev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP)
return lan743x_ptp_ioctl(netdev, ifr, cmd);
- return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+
+ return phylink_mii_ioctl(adapter->phylink, ifr, cmd);
}
static void lan743x_netdev_set_multicast(struct net_device *netdev)
@@ -3283,10 +3427,17 @@ static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
mdiobus_unregister(adapter->mdiobus);
}
+static void lan743x_destroy_phylink(struct lan743x_adapter *adapter)
+{
+ phylink_destroy(adapter->phylink);
+ adapter->phylink = NULL;
+}
+
static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
{
unregister_netdev(adapter->netdev);
+ lan743x_destroy_phylink(adapter);
lan743x_mdiobus_cleanup(adapter);
lan743x_hardware_cleanup(adapter);
lan743x_pci_cleanup(adapter);
@@ -3500,14 +3651,21 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
+ ret = lan743x_phylink_create(adapter);
+ if (ret < 0) {
+ netif_err(adapter, probe, netdev,
+ "failed to setup phylink (%d)\n", ret);
+ goto cleanup_mdiobus;
+ }
ret = register_netdev(adapter->netdev);
if (ret < 0)
- goto cleanup_mdiobus;
+ goto cleanup_phylink;
return 0;
+cleanup_phylink:
+ lan743x_destroy_phylink(adapter);
+
cleanup_mdiobus:
lan743x_mdiobus_cleanup(adapter);
@@ -3763,6 +3921,7 @@ static int lan743x_pm_resume(struct device *dev)
MAC_WK_SRC_WK_FR_SAVED_;
lan743x_csr_write(adapter, MAC_WK_SRC, data);
+ rtnl_lock();
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3771,6 +3930,7 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
+ rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 3b2585a384e2..8ef897c114d3 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -5,6 +5,7 @@
#define _LAN743X_H
#include <linux/phy.h>
+#include <linux/phylink.h>
#include "lan743x_ptp.h"
#define DRIVER_AUTHOR "Bryan Whitehead <[email protected]>"
@@ -1083,6 +1084,8 @@ struct lan743x_adapter {
u32 flags;
u32 hw_cfg;
phy_interface_t phy_interface;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
@@ -1203,5 +1206,6 @@ void lan743x_hs_syslock_release(struct lan743x_adapter *adapter);
void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
bool tx_enable, bool rx_enable);
int lan743x_sgmii_read(struct lan743x_adapter *adapter, u8 mmd, u16 addr);
+void lan743x_mac_eee_enable(struct lan743x_adapter *adapter, bool enable);
#endif /* _LAN743X_H */
diff --git a/drivers/net/ethernet/microchip/lan865x/Kconfig b/drivers/net/ethernet/microchip/lan865x/Kconfig
new file mode 100644
index 000000000000..7f2a4e7e1915
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Microchip LAN865x Driver Support
+#
+
+if NET_VENDOR_MICROCHIP
+
+config LAN865X
+ tristate "LAN865x support"
+ depends on SPI
+ select OA_TC6
+ help
+ Support for the Microchip LAN8650/1 Rev.B0/B1 MACPHY Ethernet chip. It
+ uses OPEN Alliance 10BASE-T1x Serial Interface specification.
+
+ To compile this driver as a module, choose M here. The module will be
+ called lan865x.
+
+endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/lan865x/Makefile b/drivers/net/ethernet/microchip/lan865x/Makefile
new file mode 100644
index 000000000000..9f5dd89c1eb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip LAN865x Driver
+#
+
+obj-$(CONFIG_LAN865X) += lan865x.o
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
new file mode 100644
index 000000000000..dd436bdff0f8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip's LAN865x 10BASE-T1S MAC-PHY driver
+ *
+ * Author: Parthiban Veerasooran <[email protected]>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+#define DRV_NAME "lan8650"
+
+/* MAC Network Control Register */
+#define LAN865X_REG_MAC_NET_CTL 0x00010000
+#define MAC_NET_CTL_TXEN BIT(3) /* Transmit Enable */
+#define MAC_NET_CTL_RXEN BIT(2) /* Receive Enable */
+
+/* MAC Network Configuration Reg */
+#define LAN865X_REG_MAC_NET_CFG 0x00010001
+#define MAC_NET_CFG_PROMISCUOUS_MODE BIT(4)
+#define MAC_NET_CFG_MULTICAST_MODE BIT(6)
+#define MAC_NET_CFG_UNICAST_MODE BIT(7)
+
+/* MAC Hash Register Bottom */
+#define LAN865X_REG_MAC_L_HASH 0x00010020
+/* MAC Hash Register Top */
+#define LAN865X_REG_MAC_H_HASH 0x00010021
+/* MAC Specific Addr 1 Bottom Reg */
+#define LAN865X_REG_MAC_L_SADDR1 0x00010022
+/* MAC Specific Addr 1 Top Reg */
+#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
+struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+ struct spi_device *spi;
+ struct oa_tc6 *tc6;
+};
+
+static int lan865x_set_hw_macaddr_low_bytes(struct oa_tc6 *tc6, const u8 *mac)
+{
+ u32 regval;
+
+ regval = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0];
+
+ return oa_tc6_write_register(tc6, LAN865X_REG_MAC_L_SADDR1, regval);
+}
+
+static int lan865x_set_hw_macaddr(struct lan865x_priv *priv, const u8 *mac)
+{
+ int restore_ret;
+ u32 regval;
+ int ret;
+
+ /* Configure MAC address low bytes */
+ ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6, mac);
+ if (ret)
+ return ret;
+
+ /* Prepare and configure MAC address high bytes */
+ regval = (mac[5] << 8) | mac[4];
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_SADDR1,
+ regval);
+ if (!ret)
+ return 0;
+
+ /* Restore the old MAC address low bytes from netdev if the new MAC
+ * address high bytes setting failed.
+ */
+ restore_ret = lan865x_set_hw_macaddr_low_bytes(priv->tc6,
+ priv->netdev->dev_addr);
+ if (restore_ret)
+ return restore_ret;
+
+ return ret;
+}
+
+static const struct ethtool_ops lan865x_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int lan865x_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ struct sockaddr *address = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(address->sa_data, netdev->dev_addr))
+ return 0;
+
+ ret = lan865x_set_hw_macaddr(priv, address->sa_data);
+ if (ret)
+ return ret;
+
+ eth_commit_mac_addr_change(netdev, addr);
+
+ return 0;
+}
+
+static u32 get_address_bit(u8 addr[ETH_ALEN], u32 bit)
+{
+ return ((addr[bit / 8]) >> (bit % 8)) & 1;
+}
+
+static u32 lan865x_hash(u8 addr[ETH_ALEN])
+{
+ u32 hash_index = 0;
+
+ for (int i = 0; i < 6; i++) {
+ u32 hash = 0;
+
+ for (int j = 0; j < 8; j++)
+ hash ^= get_address_bit(addr, (j * 6) + i);
+
+ hash_index |= (hash << i);
+ }
+
+ return hash_index;
+}
+
+static int lan865x_set_specific_multicast_addr(struct lan865x_priv *priv)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash_lo = 0;
+ u32 hash_hi = 0;
+ int ret;
+
+ netdev_for_each_mc_addr(ha, priv->netdev) {
+ u32 bit_num = lan865x_hash(ha->addr);
+
+ if (bit_num >= BIT(5))
+ hash_hi |= (1 << (bit_num - BIT(5)));
+ else
+ hash_lo |= (1 << bit_num);
+ }
+
+ /* Enabling specific multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, hash_hi);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, hash_lo);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_set_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ /* Enabling all multicast addresses */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH,
+ 0xffffffff);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH,
+ 0xffffffff);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int lan865x_clear_all_multicast_addr(struct lan865x_priv *priv)
+{
+ int ret;
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_H_HASH, 0);
+ if (ret) {
+ netdev_err(priv->netdev, "Failed to write reg_hashh: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_L_HASH, 0);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to write reg_hashl: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void lan865x_multicast_work_handler(struct work_struct *work)
+{
+ struct lan865x_priv *priv = container_of(work, struct lan865x_priv,
+ multicast_work);
+ u32 regval = 0;
+ int ret;
+
+ if (priv->netdev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ regval |= MAC_NET_CFG_PROMISCUOUS_MODE;
+ regval &= (~MAC_NET_CFG_MULTICAST_MODE);
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (priv->netdev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ if (lan865x_set_all_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else if (!netdev_mc_empty(priv->netdev)) {
+ /* Enabling specific multicast mode */
+ if (lan865x_set_specific_multicast_addr(priv))
+ return;
+
+ regval &= (~MAC_NET_CFG_PROMISCUOUS_MODE);
+ regval |= MAC_NET_CFG_MULTICAST_MODE;
+ regval &= (~MAC_NET_CFG_UNICAST_MODE);
+ } else {
+ /* Enabling local mac address only */
+ if (lan865x_clear_all_multicast_addr(priv))
+ return;
+ }
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CFG, regval);
+ if (ret)
+ netdev_err(priv->netdev, "Failed to enable promiscuous/multicast/normal mode: %d\n",
+ ret);
+}
+
+static void lan865x_set_multicast_list(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ schedule_work(&priv->multicast_work);
+}
+
+static netdev_tx_t lan865x_send_packet(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+
+ return oa_tc6_start_xmit(priv->tc6, skb);
+}
+
+static int lan865x_hw_disable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval &= ~(MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN);
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_close(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ netif_stop_queue(netdev);
+ phy_stop(netdev->phydev);
+ ret = lan865x_hw_disable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to disable the hardware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan865x_hw_enable(struct lan865x_priv *priv)
+{
+ u32 regval;
+
+ if (oa_tc6_read_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, &regval))
+ return -ENODEV;
+
+ regval |= MAC_NET_CTL_TXEN | MAC_NET_CTL_RXEN;
+
+ if (oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_NET_CTL, regval))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int lan865x_net_open(struct net_device *netdev)
+{
+ struct lan865x_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ ret = lan865x_hw_enable(priv);
+ if (ret) {
+ netdev_err(netdev, "Failed to enable hardware: %d\n", ret);
+ return ret;
+ }
+
+ phy_start(netdev->phydev);
+
+ return 0;
+}
+
+static const struct net_device_ops lan865x_netdev_ops = {
+ .ndo_open = lan865x_net_open,
+ .ndo_stop = lan865x_net_close,
+ .ndo_start_xmit = lan865x_send_packet,
+ .ndo_set_rx_mode = lan865x_set_multicast_list,
+ .ndo_set_mac_address = lan865x_set_mac_address,
+};
+
+static int lan865x_probe(struct spi_device *spi)
+{
+ struct net_device *netdev;
+ struct lan865x_priv *priv;
+ int ret;
+
+ netdev = alloc_etherdev(sizeof(struct lan865x_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->spi = spi;
+ spi_set_drvdata(spi, priv);
+ INIT_WORK(&priv->multicast_work, lan865x_multicast_work_handler);
+
+ priv->tc6 = oa_tc6_init(spi, netdev);
+ if (!priv->tc6) {
+ ret = -ENODEV;
+ goto free_netdev;
+ }
+
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+ * configured to 01b or 10b for proper operation. In these modes, only
+ * one receive Ethernet frame will be placed in a single data block.
+ * When the RFA field is written to 01b, received frames will be forced
+ * to only start in the first word of the data block payload (SWO=0). As
+ * recommended, enable zero align receive frame feature for proper
+ * operation.
+ *
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/AIS/ProductDocuments/Errata/LAN8650-1-Errata-80001075.pdf
+ */
+ ret = oa_tc6_zero_align_receive_frame_enable(priv->tc6);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to set ZARFE: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ /* Get the MAC address from the SPI device tree node */
+ if (device_get_ethdev_address(&spi->dev, netdev))
+ eth_hw_addr_random(netdev);
+
+ ret = lan865x_set_hw_macaddr(priv, netdev->dev_addr);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to configure MAC: %d\n", ret);
+ goto oa_tc6_exit;
+ }
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->irq = spi->irq;
+ netdev->netdev_ops = &lan865x_netdev_ops;
+ netdev->ethtool_ops = &lan865x_ethtool_ops;
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(&spi->dev, "Register netdev failed (ret = %d)", ret);
+ goto oa_tc6_exit;
+ }
+
+ return 0;
+
+oa_tc6_exit:
+ oa_tc6_exit(priv->tc6);
+free_netdev:
+ free_netdev(priv->netdev);
+ return ret;
+}
+
+static void lan865x_remove(struct spi_device *spi)
+{
+ struct lan865x_priv *priv = spi_get_drvdata(spi);
+
+ cancel_work_sync(&priv->multicast_work);
+ unregister_netdev(priv->netdev);
+ oa_tc6_exit(priv->tc6);
+ free_netdev(priv->netdev);
+}
+
+static const struct spi_device_id spidev_spi_ids[] = {
+ { .name = "lan8650" },
+ {},
+};
+
+static const struct of_device_id lan865x_dt_ids[] = {
+ { .compatible = "microchip,lan8650" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
+
+static struct spi_driver lan865x_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = lan865x_dt_ids,
+ },
+ .probe = lan865x_probe,
+ .remove = lan865x_remove,
+ .id_table = spidev_spi_ids,
+};
+module_spi_driver(lan865x_driver);
+
+MODULE_DESCRIPTION(DRV_NAME " 10Base-T1S MACPHY Ethernet Driver");
+MODULE_AUTHOR("Parthiban Veerasooran <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 182ba0a8b095..6e0929af0f72 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -821,14 +821,13 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nfp_net_name(nn), idx);
- err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
- r_vec);
+ err = request_irq(r_vec->irq_vector, r_vec->handler, IRQF_NO_AUTOEN,
+ r_vec->name, r_vec);
if (err) {
nfp_net_napi_del(&nn->dp, r_vec);
nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
return err;
}
- disable_irq(r_vec->irq_vector);
irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
new file mode 100644
index 000000000000..f9c0dcd965c2
--- /dev/null
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Author: Parthiban Veerasooran <[email protected]>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/oa_tc6.h>
+
+/* OPEN Alliance TC6 registers */
+/* Standard Capabilities Register */
+#define OA_TC6_REG_STDCAP 0x0002
+#define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
+
+/* Reset Control and Status Register */
+#define OA_TC6_REG_RESET 0x0003
+#define RESET_SWRESET BIT(0) /* Software Reset */
+
+/* Configuration Register #0 */
+#define OA_TC6_REG_CONFIG0 0x0004
+#define CONFIG0_SYNC BIT(15)
+#define CONFIG0_ZARFE_ENABLE BIT(12)
+
+/* Status Register #0 */
+#define OA_TC6_REG_STATUS0 0x0008
+#define STATUS0_RESETC BIT(6) /* Reset Complete */
+#define STATUS0_HEADER_ERROR BIT(5)
+#define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
+#define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
+#define STATUS0_TX_PROTOCOL_ERROR BIT(0)
+
+/* Buffer Status Register */
+#define OA_TC6_REG_BUFFER_STATUS 0x000B
+#define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
+#define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
+
+/* Interrupt Mask Register #0 */
+#define OA_TC6_REG_INT_MASK0 0x000C
+#define INT_MASK0_HEADER_ERR_MASK BIT(5)
+#define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
+#define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
+#define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
+
+/* PHY Clause 22 registers base address and mask */
+#define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
+#define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
+
+/* Control command header */
+#define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
+#define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
+#define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
+#define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
+#define OA_TC6_CTRL_HEADER_PARITY BIT(0)
+
+/* Data header */
+#define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
+#define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_HEADER_START_VALID BIT(20)
+#define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_HEADER_END_VALID BIT(14)
+#define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_HEADER_PARITY BIT(0)
+
+/* Data footer */
+#define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
+#define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
+#define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
+#define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
+#define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
+#define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
+#define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
+#define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
+#define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
+#define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
+
+/* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
+ * OPEN Alliance specification.
+ */
+#define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
+#define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
+#define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
+#define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
+#define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
+
+#define OA_TC6_CTRL_HEADER_SIZE 4
+#define OA_TC6_CTRL_REG_VALUE_SIZE 4
+#define OA_TC6_CTRL_IGNORED_SIZE 4
+#define OA_TC6_CTRL_MAX_REGISTERS 128
+#define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
+ (OA_TC6_CTRL_MAX_REGISTERS *\
+ OA_TC6_CTRL_REG_VALUE_SIZE) +\
+ OA_TC6_CTRL_IGNORED_SIZE)
+#define OA_TC6_CHUNK_PAYLOAD_SIZE 64
+#define OA_TC6_DATA_HEADER_SIZE 4
+#define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
+ OA_TC6_CHUNK_PAYLOAD_SIZE)
+#define OA_TC6_MAX_TX_CHUNKS 48
+#define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
+ OA_TC6_CHUNK_SIZE)
+#define STATUS0_RESETC_POLL_DELAY 1000
+#define STATUS0_RESETC_POLL_TIMEOUT 1000000
+
+/* Internal structure for MAC-PHY drivers */
+struct oa_tc6 {
+ struct device *dev;
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ struct mii_bus *mdiobus;
+ struct spi_device *spi;
+ struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ void *spi_ctrl_tx_buf;
+ void *spi_ctrl_rx_buf;
+ void *spi_data_tx_buf;
+ void *spi_data_rx_buf;
+ struct sk_buff *ongoing_tx_skb;
+ struct sk_buff *waiting_tx_skb;
+ struct sk_buff *rx_skb;
+ struct task_struct *spi_thread;
+ wait_queue_head_t spi_wq;
+ u16 tx_skb_offset;
+ u16 spi_data_tx_buf_offset;
+ u16 tx_credits;
+ u8 rx_chunks_available;
+ bool rx_buf_overflow;
+ bool int_flag;
+};
+
+enum oa_tc6_header_type {
+ OA_TC6_CTRL_HEADER,
+ OA_TC6_DATA_HEADER,
+};
+
+enum oa_tc6_register_op {
+ OA_TC6_CTRL_REG_READ = 0,
+ OA_TC6_CTRL_REG_WRITE = 1,
+};
+
+enum oa_tc6_data_valid_info {
+ OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_VALID,
+};
+
+enum oa_tc6_data_start_valid_info {
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_START_VALID,
+};
+
+enum oa_tc6_data_end_valid_info {
+ OA_TC6_DATA_END_INVALID,
+ OA_TC6_DATA_END_VALID,
+};
+
+static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
+ enum oa_tc6_header_type header_type, u16 length)
+{
+ struct spi_transfer xfer = { 0 };
+ struct spi_message msg;
+
+ if (header_type == OA_TC6_DATA_HEADER) {
+ xfer.tx_buf = tc6->spi_data_tx_buf;
+ xfer.rx_buf = tc6->spi_data_rx_buf;
+ } else {
+ xfer.tx_buf = tc6->spi_ctrl_tx_buf;
+ xfer.rx_buf = tc6->spi_ctrl_rx_buf;
+ }
+ xfer.len = length;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(tc6->spi, &msg);
+}
+
+static int oa_tc6_get_parity(u32 p)
+{
+ /* Public domain code snippet, lifted from
+ * http://www-graphics.stanford.edu/~seander/bithacks.html
+ */
+ p ^= p >> 1;
+ p ^= p >> 2;
+ p = (p & 0x11111111U) * 0x11111111U;
+
+ /* Odd parity is used here */
+ return !((p >> 28) & 1);
+}
+
+static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ u32 header;
+
+ header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
+ OA_TC6_CTRL_HEADER) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
+ FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
+ header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ *tx_buf++ = cpu_to_be32(value[i]);
+}
+
+static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
+{
+ /* Control command consists 4 bytes header + 4 bytes register value for
+ * each register + 4 bytes ignored value.
+ */
+ return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
+ OA_TC6_CTRL_IGNORED_SIZE;
+}
+
+static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
+ u32 value[], u8 length,
+ enum oa_tc6_register_op reg_op)
+{
+ __be32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
+
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ oa_tc6_update_ctrl_write_data(tc6, value, length);
+}
+
+static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u8 *tx_buf = tc6->spi_ctrl_tx_buf;
+ u8 *rx_buf = tc6->spi_ctrl_rx_buf;
+
+ rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
+
+ /* The echoed control write must match with the one that was
+ * transmitted.
+ */
+ if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
+{
+ u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
+ u32 *tx_buf = tc6->spi_ctrl_tx_buf;
+
+ /* The echoed control read header must match with the one that was
+ * transmitted.
+ */
+ if (*tx_buf != *rx_buf)
+ return -EPROTO;
+
+ return 0;
+}
+
+static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
+ u8 length)
+{
+ __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
+ OA_TC6_CTRL_HEADER_SIZE;
+
+ for (int i = 0; i < length; i++)
+ value[i] = be32_to_cpu(*rx_buf++);
+}
+
+static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length, enum oa_tc6_register_op reg_op)
+{
+ u16 size;
+ int ret;
+
+ /* Prepare control command and copy to SPI control buffer */
+ oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
+
+ size = oa_tc6_calculate_ctrl_buf_size(length);
+
+ /* Perform SPI transfer */
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Check echoed/received control write command reply for errors */
+ if (reg_op == OA_TC6_CTRL_REG_WRITE)
+ return oa_tc6_check_ctrl_write_reply(tc6, size);
+
+ /* Check echoed/received control read command reply for errors */
+ ret = oa_tc6_check_ctrl_read_reply(tc6, size);
+ if (ret)
+ return ret;
+
+ oa_tc6_copy_ctrl_read_data(tc6, value, length);
+
+ return 0;
+}
+
+/**
+ * oa_tc6_read_registers - function for reading multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be read in the MAC-PHY.
+ * @value: values to be read from the starting register address @address.
+ * @length: number of consecutive registers to be read from @address.
+ *
+ * Maximum of 128 consecutive registers can be read starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_READ);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
+
+/**
+ * oa_tc6_read_register - function for reading a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be read.
+ * @value: value read from the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
+{
+ return oa_tc6_read_registers(tc6, address, value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_read_register);
+
+/**
+ * oa_tc6_write_registers - function for writing multiple consecutive registers.
+ * @tc6: oa_tc6 struct.
+ * @address: address of the first register to be written in the MAC-PHY.
+ * @value: values to be written from the starting register address @address.
+ * @length: number of consecutive registers to be written from @address.
+ *
+ * Maximum of 128 consecutive registers can be written starting at @address.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length)
+{
+ int ret;
+
+ if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
+ dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tc6->spi_ctrl_lock);
+ ret = oa_tc6_perform_ctrl(tc6, address, value, length,
+ OA_TC6_CTRL_REG_WRITE);
+ mutex_unlock(&tc6->spi_ctrl_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
+
+/**
+ * oa_tc6_write_register - function for writing a MAC-PHY register.
+ * @tc6: oa_tc6 struct.
+ * @address: register address of the MAC-PHY to be written.
+ * @value: value to be written in the @address register address of the MAC-PHY.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
+{
+ return oa_tc6_write_registers(tc6, address, &value, 1);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_write_register);
+
+static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void oa_tc6_handle_link_change(struct net_device *netdev)
+{
+ phy_print_status(netdev->phydev);
+}
+
+static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ bool ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+
+ return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
+ (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
+ val);
+}
+
+static int oa_tc6_get_phy_c45_mms(int devnum)
+{
+ switch (devnum) {
+ case MDIO_MMD_PCS:
+ return OA_TC6_PHY_C45_PCS_MMS2;
+ case MDIO_MMD_PMAPMD:
+ return OA_TC6_PHY_C45_PMA_PMD_MMS3;
+ case MDIO_MMD_VEND2:
+ return OA_TC6_PHY_C45_VS_PLCA_MMS4;
+ case MDIO_MMD_AN:
+ return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
+ case MDIO_MMD_POWER_UNIT:
+ return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
+ if (ret)
+ return ret;
+
+ return regval;
+}
+
+static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ struct oa_tc6 *tc6 = bus->priv;
+ int ret;
+
+ ret = oa_tc6_get_phy_c45_mms(devnum);
+ if (ret < 0)
+ return ret;
+
+ return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
+}
+
+static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ tc6->mdiobus = mdiobus_alloc();
+ if (!tc6->mdiobus) {
+ netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ tc6->mdiobus->priv = tc6;
+ tc6->mdiobus->read = oa_tc6_mdiobus_read;
+ tc6->mdiobus->write = oa_tc6_mdiobus_write;
+ /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it
+ * assumes it uses C22 protocol and always uses C22 registers indirect
+ * access to access C45 registers. This is because, we don't have a
+ * clean separation between C22/C45 register space and C22/C45 MDIO bus
+ * protocols. Resulting, PHY C45 registers direct access can't be used
+ * which can save multiple SPI bus access. To support this feature, PHY
+ * drivers can set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
+ */
+ tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
+ tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
+ tc6->mdiobus->name = "oa-tc6-mdiobus";
+ tc6->mdiobus->parent = tc6->dev;
+
+ snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
+ dev_name(&tc6->spi->dev));
+
+ ret = mdiobus_register(tc6->mdiobus);
+ if (ret) {
+ netdev_err(tc6->netdev, "Could not register MDIO bus\n");
+ mdiobus_free(tc6->mdiobus);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
+{
+ mdiobus_unregister(tc6->mdiobus);
+ mdiobus_free(tc6->mdiobus);
+}
+
+static int oa_tc6_phy_init(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
+ if (ret) {
+ netdev_err(tc6->netdev,
+ "Direct PHY register access is not supported by the MAC-PHY\n");
+ return ret;
+ }
+
+ ret = oa_tc6_mdiobus_register(tc6);
+ if (ret)
+ return ret;
+
+ tc6->phydev = phy_find_first(tc6->mdiobus);
+ if (!tc6->phydev) {
+ netdev_err(tc6->netdev, "No PHY found\n");
+ oa_tc6_mdiobus_unregister(tc6);
+ return -ENODEV;
+ }
+
+ tc6->phydev->is_internal = true;
+ ret = phy_connect_direct(tc6->netdev, tc6->phydev,
+ &oa_tc6_handle_link_change,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (ret) {
+ netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
+ tc6->mdiobus->id);
+ oa_tc6_mdiobus_unregister(tc6);
+ return ret;
+ }
+
+ phy_attached_info(tc6->netdev->phydev);
+
+ return 0;
+}
+
+static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
+{
+ phy_disconnect(tc6->phydev);
+ oa_tc6_mdiobus_unregister(tc6);
+}
+
+static int oa_tc6_read_status0(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
+ ret);
+ return 0;
+ }
+
+ return regval;
+}
+
+static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
+{
+ u32 regval = RESET_SWRESET;
+ int ret;
+
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
+ if (ret)
+ return ret;
+
+ /* Poll for soft reset complete for every 1ms until 1s timeout */
+ ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
+ regval & STATUS0_RESETC,
+ STATUS0_RESETC_POLL_DELAY,
+ STATUS0_RESETC_POLL_TIMEOUT);
+ if (ret)
+ return -ENODEV;
+
+ /* Clear the reset complete status */
+ return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
+}
+
+static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
+ if (ret)
+ return ret;
+
+ regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
+ INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
+ INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
+ INT_MASK0_HEADER_ERR_MASK);
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
+}
+
+static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
+ if (ret)
+ return ret;
+
+ /* Enable configuration synchronization for data transfer */
+ value |= CONFIG0_SYNC;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
+}
+
+static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ kfree_skb(tc6->rx_skb);
+ tc6->rx_skb = NULL;
+ }
+}
+
+static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
+{
+ if (tc6->ongoing_tx_skb) {
+ tc6->netdev->stats.tx_dropped++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+}
+
+static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Clear the error interrupts status */
+ ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
+ if (ret) {
+ netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
+ tc6->rx_buf_overflow = true;
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ net_err_ratelimited("%s: Receive buffer overflow error\n",
+ tc6->netdev->name);
+ return -EAGAIN;
+ }
+ if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
+ netdev_err(tc6->netdev, "Transmit protocol error\n");
+ return -ENODEV;
+ }
+ /* TODO: Currently loss of frame and header errors are treated as
+ * non-recoverable errors. They will be handled in the next version.
+ */
+ if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
+ netdev_err(tc6->netdev, "Loss of frame error\n");
+ return -ENODEV;
+ }
+ if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
+ netdev_err(tc6->netdev, "Header error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
+{
+ /* Process rx chunk footer for the following,
+ * 1. tx credits
+ * 2. errors if any from MAC-PHY
+ * 3. receive chunks available
+ */
+ tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
+ tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
+ footer);
+
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
+ int ret = oa_tc6_process_extended_status(tc6);
+
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: Currently received header bad and configuration unsync errors
+ * are treated as non-recoverable errors. They will be handled in the
+ * next version.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
+ netdev_err(tc6->netdev, "Rxd header bad error\n");
+ return -ENODEV;
+ }
+
+ if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
+ netdev_err(tc6->netdev, "Config unsync error\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
+ tc6->netdev->stats.rx_packets++;
+ tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
+
+ netif_rx(tc6->rx_skb);
+
+ tc6->rx_skb = NULL;
+}
+
+static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
+{
+ memcpy(skb_put(tc6->rx_skb, length), payload, length);
+}
+
+static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
+{
+ tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+ if (!tc6->rx_skb) {
+ tc6->netdev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+
+ return 0;
+}
+
+static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ int ret;
+
+ ret = oa_tc6_allocate_rx_skb(tc6);
+ if (ret)
+ return ret;
+
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ return 0;
+}
+
+static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
+{
+ oa_tc6_update_rx_skb(tc6, payload, size);
+
+ oa_tc6_submit_rx_skb(tc6);
+}
+
+static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
+ u32 footer)
+{
+ oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
+}
+
+static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
+ u32 footer)
+{
+ u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
+ footer) * sizeof(u32);
+ u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
+ footer);
+ bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
+ bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
+ u16 size;
+
+ /* Restart the new rx frame after receiving rx buffer overflow error */
+ if (start_valid && tc6->rx_buf_overflow)
+ tc6->rx_buf_overflow = false;
+
+ if (tc6->rx_buf_overflow)
+ return 0;
+
+ /* Process the chunk with complete rx frame */
+ if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
+ size = end_byte_offset + 1 - start_byte_offset;
+ return oa_tc6_prcs_complete_rx_frame(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame start */
+ if (start_valid && !end_valid) {
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with only rx frame end */
+ if (end_valid && !start_valid) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ return 0;
+ }
+
+ /* Process the chunk with previous rx frame end and next rx frame
+ * start.
+ */
+ if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
+ /* After rx buffer overflow error received, there might be a
+ * possibility of getting an end valid of a previously
+ * incomplete rx frame along with the new rx frame start valid.
+ */
+ if (tc6->rx_skb) {
+ size = end_byte_offset + 1;
+ oa_tc6_prcs_rx_frame_end(tc6, data, size);
+ }
+ size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
+ return oa_tc6_prcs_rx_frame_start(tc6,
+ &data[start_byte_offset],
+ size);
+ }
+
+ /* Process the chunk with ongoing rx frame data */
+ oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
+
+ return 0;
+}
+
+static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
+{
+ u8 *rx_buf = tc6->spi_data_rx_buf;
+ __be32 footer;
+
+ footer = *((__be32 *)&rx_buf[footer_offset]);
+
+ return be32_to_cpu(footer);
+}
+
+static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
+{
+ u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
+ u32 footer;
+ int ret;
+
+ /* All the rx chunks in the receive SPI data buffer are examined here */
+ for (int i = 0; i < no_of_rx_chunks; i++) {
+ /* Last 4 bytes in each received chunk consist footer info */
+ footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
+ OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
+ if (ret)
+ return ret;
+
+ /* If there is a data valid chunks then process it for the
+ * information needed to determine the validity and the location
+ * of the receive frame data.
+ */
+ if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
+ u8 *payload = tc6->spi_data_rx_buf + i *
+ OA_TC6_CHUNK_SIZE;
+
+ ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
+ footer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
+ bool end_valid, u8 end_byte_offset)
+{
+ u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
+ OA_TC6_DATA_HEADER) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
+ FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
+ end_byte_offset);
+
+ header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
+ oa_tc6_get_parity(header));
+
+ return cpu_to_be32(header);
+}
+
+static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
+{
+ enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
+ __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
+ u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
+ u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
+ enum oa_tc6_data_start_valid_info start_valid;
+ u8 end_byte_offset = 0;
+ u16 length_to_copy;
+
+ /* Initial value is assigned here to avoid more than 80 characters in
+ * the declaration place.
+ */
+ start_valid = OA_TC6_DATA_START_INVALID;
+
+ /* Set start valid if the current tx chunk contains the start of the tx
+ * ethernet frame.
+ */
+ if (!tc6->tx_skb_offset)
+ start_valid = OA_TC6_DATA_START_VALID;
+
+ /* If the remaining tx skb length is more than the chunk payload size of
+ * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
+ * next tx chunk.
+ */
+ length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
+
+ /* Copy the tx skb data to the tx chunk payload buffer */
+ memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
+ tc6->tx_skb_offset += length_to_copy;
+
+ /* Set end valid if the current tx chunk contains the end of the tx
+ * ethernet frame.
+ */
+ if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
+ end_valid = OA_TC6_DATA_END_VALID;
+ end_byte_offset = length_to_copy - 1;
+ tc6->tx_skb_offset = 0;
+ tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
+ tc6->netdev->stats.tx_packets++;
+ kfree_skb(tc6->ongoing_tx_skb);
+ tc6->ongoing_tx_skb = NULL;
+ }
+
+ *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
+ end_valid, end_byte_offset);
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
+{
+ u16 used_tx_credits;
+
+ /* Get tx skbs and convert them into tx chunks based on the tx credits
+ * available.
+ */
+ for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
+ used_tx_credits++) {
+ if (!tc6->ongoing_tx_skb) {
+ tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
+ tc6->waiting_tx_skb = NULL;
+ }
+ if (!tc6->ongoing_tx_skb)
+ break;
+ oa_tc6_add_tx_skb_to_spi_buf(tc6);
+ }
+
+ return used_tx_credits * OA_TC6_CHUNK_SIZE;
+}
+
+static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
+ u16 needed_empty_chunks)
+{
+ __be32 header;
+
+ header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
+ OA_TC6_DATA_START_INVALID,
+ OA_TC6_DATA_END_INVALID, 0);
+
+ while (needed_empty_chunks--) {
+ __be32 *tx_buf = tc6->spi_data_tx_buf +
+ tc6->spi_data_tx_buf_offset;
+
+ *tx_buf = header;
+ tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
+ }
+}
+
+static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
+{
+ u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
+ u16 needed_empty_chunks;
+
+ /* If there are more chunks to receive than to transmit, we need to add
+ * enough empty tx chunks to allow the reception of the excess rx
+ * chunks.
+ */
+ if (tx_chunks >= tc6->rx_chunks_available)
+ return len;
+
+ needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
+
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
+
+ return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
+}
+
+static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
+{
+ int ret;
+
+ while (true) {
+ u16 spi_len = 0;
+
+ tc6->spi_data_tx_buf_offset = 0;
+
+ if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
+
+ spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
+
+ if (tc6->int_flag) {
+ tc6->int_flag = false;
+ if (spi_len == 0) {
+ oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
+ spi_len = OA_TC6_CHUNK_SIZE;
+ }
+ }
+
+ if (spi_len == 0)
+ break;
+
+ ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
+ if (ret) {
+ netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
+ if (ret) {
+ if (ret == -EAGAIN)
+ continue;
+
+ oa_tc6_cleanup_ongoing_tx_skb(tc6);
+ oa_tc6_cleanup_ongoing_rx_skb(tc6);
+ netdev_err(tc6->netdev, "Device error: %d\n", ret);
+ return ret;
+ }
+
+ if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
+ netif_wake_queue(tc6->netdev);
+ }
+
+ return 0;
+}
+
+static int oa_tc6_spi_thread_handler(void *data)
+{
+ struct oa_tc6 *tc6 = data;
+ int ret;
+
+ while (likely(!kthread_should_stop())) {
+ /* This kthread will be waken up if there is a tx skb or mac-phy
+ * interrupt to perform spi transfer with tx chunks.
+ */
+ wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
+ tc6->int_flag ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ ret = oa_tc6_try_spi_transfer(tc6);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
+{
+ u32 value;
+ int ret;
+
+ /* Initially tx credits and rx chunks available to be updated from the
+ * register as there is no data transfer performed yet. Later they will
+ * be updated from the rx footer.
+ */
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
+ if (ret)
+ return ret;
+
+ tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
+ tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
+ value);
+
+ return 0;
+}
+
+static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
+{
+ struct oa_tc6 *tc6 = data;
+
+ /* MAC-PHY interrupt can occur for the following reasons.
+ * - availability of tx credits if it was 0 before and not reported in
+ * the previous rx footer.
+ * - availability of rx chunks if it was 0 before and not reported in
+ * the previous rx footer.
+ * - extended status event not reported in the previous rx footer.
+ */
+ tc6->int_flag = true;
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
+ * receive frame feature.
+ * @tc6: oa_tc6 struct.
+ *
+ * Return: 0 on success otherwise failed.
+ */
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
+{
+ u32 regval;
+ int ret;
+
+ ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
+ if (ret)
+ return ret;
+
+ /* Set Zero-Align Receive Frame Enable */
+ regval |= CONFIG0_ZARFE_ENABLE;
+
+ return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
+
+/**
+ * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
+ * frame.
+ * @tc6: oa_tc6 struct.
+ * @skb: socket buffer in which the ethernet frame is stored.
+ *
+ * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
+ * otherwise returns NETDEV_TX_BUSY.
+ */
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
+{
+ if (tc6->waiting_tx_skb) {
+ netif_stop_queue(tc6->netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ tc6->netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ tc6->waiting_tx_skb = skb;
+
+ /* Wake spi kthread to perform spi transfer */
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
+
+/**
+ * oa_tc6_init - allocates and initializes oa_tc6 structure.
+ * @spi: device with which data will be exchanged.
+ * @netdev: network device interface structure.
+ *
+ * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
+ * initialization is successful otherwise NULL.
+ */
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
+{
+ struct oa_tc6 *tc6;
+ int ret;
+
+ tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
+ if (!tc6)
+ return NULL;
+
+ tc6->spi = spi;
+ tc6->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ mutex_init(&tc6->spi_ctrl_lock);
+
+ /* Set the SPI controller to pump at realtime priority */
+ tc6->spi->rt = true;
+ spi_setup(tc6->spi);
+
+ tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_tx_buf)
+ return NULL;
+
+ tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_CTRL_SPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_ctrl_rx_buf)
+ return NULL;
+
+ tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_tx_buf)
+ return NULL;
+
+ tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
+ OA_TC6_SPI_DATA_BUF_SIZE,
+ GFP_KERNEL);
+ if (!tc6->spi_data_rx_buf)
+ return NULL;
+
+ ret = oa_tc6_sw_reset_macphy(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY software reset failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC-PHY error interrupts unmask failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_phy_init(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "MAC internal PHY initialization failed: %d\n", ret);
+ return NULL;
+ }
+
+ ret = oa_tc6_enable_data_transfer(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
+ ret);
+ goto phy_exit;
+ }
+
+ ret = oa_tc6_update_buffer_status_from_register(tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev,
+ "Failed to update buffer status: %d\n", ret);
+ goto phy_exit;
+ }
+
+ init_waitqueue_head(&tc6->spi_wq);
+
+ tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
+ "oa-tc6-spi-thread");
+ if (IS_ERR(tc6->spi_thread)) {
+ dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
+ goto phy_exit;
+ }
+
+ sched_set_fifo(tc6->spi_thread);
+
+ ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
+ IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
+ tc6);
+ if (ret) {
+ dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
+ ret);
+ goto kthread_stop;
+ }
+
+ /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
+ * complete status. IRQ is also asserted on reset completion and it is
+ * remain asserted until MAC-PHY receives a data chunk. So performing an
+ * empty data chunk transmission will deassert the IRQ. Refer section
+ * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
+ */
+ tc6->int_flag = true;
+ wake_up_interruptible(&tc6->spi_wq);
+
+ return tc6;
+
+kthread_stop:
+ kthread_stop(tc6->spi_thread);
+phy_exit:
+ oa_tc6_phy_exit(tc6);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(oa_tc6_init);
+
+/**
+ * oa_tc6_exit - exit function.
+ * @tc6: oa_tc6 struct.
+ */
+void oa_tc6_exit(struct oa_tc6 *tc6)
+{
+ oa_tc6_phy_exit(tc6);
+ kthread_stop(tc6->spi_thread);
+ dev_kfree_skb_any(tc6->ongoing_tx_skb);
+ dev_kfree_skb_any(tc6->waiting_tx_skb);
+ dev_kfree_skb_any(tc6->rx_skb);
+}
+EXPORT_SYMBOL_GPL(oa_tc6_exit);
+
+MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
+MODULE_AUTHOR("Parthiban Veerasooran <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index bcef8ab715bf..d7cdea8f604d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2042,12 +2042,14 @@ int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
- int err;
- u32 word;
struct qlcnic_cmd_args cmd;
- const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
- 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
- 0x255b0ec26d5a56daULL };
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+ u32 word;
+ int err;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
if (err)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 3cb1c4f5c91a..45ac8befba29 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -578,7 +578,7 @@ struct rtl8169_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
};
struct rtl8169_tc_offsets {
@@ -1843,7 +1843,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
data[9] = le64_to_cpu(counters->rx_broadcast);
data[10] = le32_to_cpu(counters->rx_multicast);
data[11] = le16_to_cpu(counters->tx_aborted);
- data[12] = le16_to_cpu(counters->tx_underun);
+ data[12] = le16_to_cpu(counters->tx_underrun);
}
static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 7882f2c0e1a4..869183e1565e 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -98,7 +98,7 @@ struct rtase_counters {
__le64 rx_broadcast;
__le32 rx_multicast;
__le16 tx_aborted;
- __le16 tx_underun;
+ __le16 tx_underrun;
} __packed;
static void rtase_w8(const struct rtase_private *tp, u16 reg, u8 val8)
@@ -1619,8 +1619,8 @@ static void rtase_dump_state(const struct net_device *dev)
le32_to_cpu(counters->rx_multicast));
netdev_err(dev, "tx_aborted %d\n",
le16_to_cpu(counters->tx_aborted));
- netdev_err(dev, "tx_underun %d\n",
- le16_to_cpu(counters->tx_underun));
+ netdev_err(dev, "tx_underrun %d\n",
+ le16_to_cpu(counters->tx_underrun));
}
static void rtase_tx_timeout(struct net_device *dev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7d69302ffa0a..de131fc5fa0b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4302,3 +4302,130 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.sensor_event = efx_mcdi_sensor_event,
.rx_recycle_ring_size = efx_ef10_recycle_ring_size,
};
+
+const struct efx_nic_type efx_x4_nic_type = {
+ .is_vf = false,
+ .mem_bar = efx_ef10_pf_mem_bar,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_pf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_ef10_fini_nic,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_pf,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ .get_fec_stats = efx_ef10_get_fec_stats,
+ .test_chip = efx_ef10_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_write = efx_ef10_tx_write,
+ .tx_limit_len = efx_ef10_tx_limit_len,
+ .tx_enqueue = __efx_enqueue_skb,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .rx_packet = __efx_rx_packet,
+ .ev_probe = efx_mcdi_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+ .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+ .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+ .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+#ifdef CONFIG_SFC_SRIOV
+ /* currently set to the VF versions of these functions
+ * because SRIOV will be reimplemented later.
+ */
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
+ .tso_versions = efx_ef10_tso_versions,
+
+ .get_phys_port_id = efx_ef10_get_phys_port_id,
+ .revision = EFX_REV_X4,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .option_descriptors = true,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = EF10_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+ .check_caps = ef10_check_caps,
+ .print_additional_fwver = efx_ef10_print_additional_fwver,
+ .sensor_event = efx_mcdi_sensor_event,
+ .rx_recycle_ring_size = efx_ef10_recycle_ring_size,
+};
+
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6f1a01ded7d4..36b3b57e2055 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -821,6 +821,10 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0c03), /* X4 PF (FF/LL) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x2c03), /* X4 PF (FF only) */
+ .driver_data = (unsigned long)&efx_x4_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 1db64fc6e909..9fa5c4c713ab 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -211,4 +211,6 @@ int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+extern const struct efx_nic_type efx_x4_nic_type;
+
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 466df5348b29..7ec4ac7b7ff5 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -21,6 +21,7 @@ enum {
*/
EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
+ EFX_REV_X4 = 6,
};
static inline int efx_nic_rev(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 31c387cc5f26..a1858f083eef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -58,10 +58,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
- /* Enable FPE interrupt */
- if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
- value |= GMAC_INT_FPE_EN;
-
writel(value, ioaddr + GMAC_INT_EN);
if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
@@ -1268,6 +1264,9 @@ const struct stmmac_ops dwmac410_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
@@ -1320,6 +1319,9 @@ const struct stmmac_ops dwmac510_ops = {
.fpe_configure = dwmac5_fpe_configure,
.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
.fpe_irq_status = dwmac5_fpe_irq_status,
+ .fpe_get_add_frag_size = dwmac5_fpe_get_add_frag_size,
+ .fpe_set_add_frag_size = dwmac5_fpe_set_add_frag_size,
+ .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index e02cebc3f1b7..08add508db84 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -575,11 +575,11 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable)
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (enable) {
+ if (tx_enable) {
cfg->fpe_csr = EFPE;
value = readl(ioaddr + GMAC_RXQ_CTRL1);
value &= ~GMAC_RXQCTRL_FPRQ;
@@ -589,6 +589,21 @@ void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
cfg->fpe_csr = 0;
}
writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+
+ value = readl(ioaddr + GMAC_INT_EN);
+
+ if (pmac_enable) {
+ if (!(value & GMAC_INT_FPE_EN)) {
+ /* Dummy read to clear any pending masked interrupts */
+ readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ value |= GMAC_INT_FPE_EN;
+ }
+ } else {
+ value &= ~GMAC_INT_FPE_EN;
+ }
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
@@ -605,22 +620,22 @@ int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
if (value & TRSP) {
status |= FPE_EVENT_TRSP;
- netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
}
if (value & TVER) {
status |= FPE_EVENT_TVER;
- netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
}
if (value & RRSP) {
status |= FPE_EVENT_RRSP;
- netdev_info(dev, "FPE: Respond mPacket is received\n");
+ netdev_dbg(dev, "FPE: Respond mPacket is received\n");
}
if (value & RVER) {
status |= FPE_EVENT_RVER;
- netdev_info(dev, "FPE: Verify mPacket is received\n");
+ netdev_dbg(dev, "FPE: Verify mPacket is received\n");
}
return status;
@@ -638,3 +653,72 @@ void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
writel(value, ioaddr + MAC_FPE_CTRL_STS);
}
+
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+ return FIELD_GET(DWMAC5_ADD_FRAG_SZ, readl(ioaddr + MTL_FPE_CTRL_STS));
+}
+
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size)
+{
+ u32 value;
+
+ value = readl(ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(value, add_frag_size, DWMAC5_ADD_FRAG_SZ),
+ ioaddr + MTL_FPE_CTRL_STS);
+}
+
+#define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
+#define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
+
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass)
+{
+ u32 val, offset, count, queue_weight, preemptible_txqs = 0;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 num_tc = ndev->num_tc;
+
+ if (!pclass)
+ goto update_mapping;
+
+ /* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
+ *
+ * Synopsys Databook:
+ * "The number of Tx DMA channels is equal to the number of Tx queues,
+ * and is direct one-to-one mapping."
+ */
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ count = ndev->tc_to_txq[tc].count;
+ offset = ndev->tc_to_txq[tc].offset;
+
+ if (pclass & BIT(tc))
+ preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+ /* This is 1:1 mapping, go to next TC */
+ if (count == 1)
+ continue;
+
+ if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
+ NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
+ return -EINVAL;
+ }
+
+ queue_weight = priv->plat->tx_queues_cfg[offset].weight;
+
+ for (u32 i = 1; i < count; i++) {
+ if (priv->plat->tx_queues_cfg[offset + i].weight !=
+ queue_weight) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
+ queue_weight, tc);
+ return -EINVAL;
+ }
+ }
+ }
+
+update_mapping:
+ val = readl(priv->ioaddr + MTL_FPE_CTRL_STS);
+ writel(u32_replace_bits(val, preemptible_txqs, DWMAC5_PREEMPTION_CLASS),
+ priv->ioaddr + MTL_FPE_CTRL_STS);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index bf33a51d229e..6c6eb6790e83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -39,6 +39,12 @@
#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+#define MTL_FPE_CTRL_STS 0x00000c90
+/* Preemption Classification */
+#define DWMAC5_PREEMPTION_CLASS GENMASK(15, 8)
+/* Additional Fragment Size of preempted frames */
+#define DWMAC5_ADD_FRAG_SZ GENMASK(1, 0)
+
#define MTL_RXP_CONTROL_STATUS 0x00000ca0
#define RXPI BIT(31)
#define NPE GENMASK(23, 16)
@@ -104,10 +110,14 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
u32 sub_second_inc, u32 systime_flags);
void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+int dwmac5_fpe_get_add_frag_size(const void __iomem *ioaddr);
+void dwmac5_fpe_set_add_frag_size(void __iomem *ioaddr, u32 add_frag_size);
+int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
+ struct netlink_ext_ack *extack, u32 pclass);
#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index cbf2dd976ab1..f519d43738b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -1504,13 +1504,14 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
-static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
- u32 num_txq,
- u32 num_rxq, bool enable)
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool tx_enable, bool pmac_enable)
{
u32 value;
- if (!enable) {
+ if (!tx_enable) {
value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
value &= ~XGMAC_EFPE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 29367105df54..88cce28b2f98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -171,7 +171,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwmac4_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwmac4_tc_ops,
.mmc = &dwmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwmac4_setup,
@@ -252,7 +252,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxgmac210_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxgmac2_setup,
@@ -273,7 +273,7 @@ static const struct stmmac_hwif_entry {
.mac = &dwxlgmac2_ops,
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
- .tc = &dwmac510_tc_ops,
+ .tc = &dwxgmac_tc_ops,
.mmc = &dwxgmac_mmc_ops,
.est = &dwmac510_est_ops,
.setup = dwxlgmac2_setup,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 7e90f34b8c88..d5a9f01ecac5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
+#include <net/pkt_cls.h>
#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
({ \
@@ -28,6 +29,8 @@
struct stmmac_extra_stats;
struct stmmac_priv;
struct stmmac_safety_stats;
+struct stmmac_fpe_cfg;
+enum stmmac_mpacket_type;
struct dma_desc;
struct dma_extended_desc;
struct dma_edesc;
@@ -419,11 +422,16 @@ struct stmmac_ops {
void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
u32 num_txq, u32 num_rxq,
- bool enable);
+ bool tx_enable, bool pmac_enable);
void (*fpe_send_mpacket)(void __iomem *ioaddr,
struct stmmac_fpe_cfg *cfg,
enum stmmac_mpacket_type type);
int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+ int (*fpe_get_add_frag_size)(const void __iomem *ioaddr);
+ void (*fpe_set_add_frag_size)(void __iomem *ioaddr, u32 add_frag_size);
+ int (*fpe_map_preemption_class)(struct net_device *ndev,
+ struct netlink_ext_ack *extack,
+ u32 pclass);
};
#define stmmac_core_init(__priv, __args...) \
@@ -528,6 +536,12 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
#define stmmac_fpe_irq_status(__priv, __args...) \
stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+#define stmmac_fpe_get_add_frag_size(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_get_add_frag_size, __args)
+#define stmmac_fpe_set_add_frag_size(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_set_add_frag_size, __args)
+#define stmmac_fpe_map_preemption_class(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_map_preemption_class, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -615,6 +629,8 @@ struct stmmac_tc_ops {
struct tc_etf_qopt_offload *qopt);
int (*query_caps)(struct stmmac_priv *priv,
struct tc_query_caps_base *base);
+ int (*setup_mqprio)(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *qopt);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -631,6 +647,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_etf, __args)
#define stmmac_tc_query_caps(__priv, __args...) \
stmmac_do_callback(__priv, tc, query_caps, __args)
+#define stmmac_tc_setup_mqprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_mqprio, __args)
struct stmmac_counters;
@@ -674,7 +692,9 @@ extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_ops dwmac410_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac4_tc_ops;
extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_tc_ops dwxgmac_tc_ops;
extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_ops dwxlgmac2_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b23b920eedb1..ea135203ff2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -146,6 +146,32 @@ struct stmmac_channel {
u32 index;
};
+/* FPE link-partner hand-shaking mPacket type */
+enum stmmac_mpacket_type {
+ MPACKET_VERIFY = 0,
+ MPACKET_RESPONSE = 1,
+};
+
+#define STMMAC_FPE_MM_MAX_VERIFY_RETRIES 3
+#define STMMAC_FPE_MM_MAX_VERIFY_TIME_MS 128
+
+struct stmmac_fpe_cfg {
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates.
+ */
+ spinlock_t lock;
+
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
+
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
struct stmmac_tc_entry {
bool in_use;
bool in_hw;
@@ -339,11 +365,8 @@ struct stmmac_priv {
struct workqueue_struct *wq;
struct work_struct service_task;
- /* Workqueue for handling FPE hand-shaking */
- unsigned long fpe_task_state;
- struct workqueue_struct *fpe_wq;
- struct work_struct fpe_task;
- char wq_name[IFNAMSIZ + 4];
+ /* Frame Preemption feature (FPE) */
+ struct stmmac_fpe_cfg fpe_cfg;
/* TC Handling */
unsigned int tc_entries_max;
@@ -397,7 +420,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+void stmmac_fpe_apply(struct stmmac_priv *priv);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index a45af300bad8..2a37592a6281 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -19,6 +19,7 @@
#include "stmmac.h"
#include "dwmac_dma.h"
#include "dwxgmac2.h"
+#include "dwmac5.h"
#define REG_SPACE_SIZE 0x1060
#define GMAC4_REG_SPACE_SIZE 0x116C
@@ -1263,6 +1264,98 @@ static int stmmac_set_tunable(struct net_device *dev,
return ret;
}
+static int stmmac_get_mm(struct net_device *ndev,
+ struct ethtool_mm_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 frag_size;
+
+ if (!priv->dma_cap.fpesel)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->fpe_cfg.lock, flags);
+
+ state->max_verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ state->verify_enabled = priv->fpe_cfg.verify_enabled;
+ state->pmac_enabled = priv->fpe_cfg.pmac_enabled;
+ state->verify_time = priv->fpe_cfg.verify_time;
+ state->tx_enabled = priv->fpe_cfg.tx_enabled;
+ state->verify_status = priv->fpe_cfg.status;
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ /* FPE active if common tx_enabled and
+ * (verification success or disabled(forced))
+ */
+ if (state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED))
+ state->tx_active = true;
+ else
+ state->tx_active = false;
+
+ frag_size = stmmac_fpe_get_add_frag_size(priv, priv->ioaddr);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(frag_size);
+
+ spin_unlock_irqrestore(&priv->fpe_cfg.lock, flags);
+
+ return 0;
+}
+
+static int stmmac_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
+ u32 frag_size;
+ int err;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &frag_size, extack);
+ if (err)
+ return err;
+
+ /* Wait for the verification that's currently in progress to finish */
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ fpe_cfg->verify_enabled = cfg->verify_enabled;
+ fpe_cfg->pmac_enabled = cfg->pmac_enabled;
+ fpe_cfg->verify_time = cfg->verify_time;
+ fpe_cfg->tx_enabled = cfg->tx_enabled;
+
+ if (!cfg->verify_enabled)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+
+ stmmac_fpe_set_add_frag_size(priv, priv->ioaddr, frag_size);
+ stmmac_fpe_apply(priv);
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
+
+ return 0;
+}
+
+static void stmmac_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_counters *mmc = &priv->mmc;
+
+ if (!priv->dma_cap.rmon)
+ return;
+
+ stmmac_mmc_read(priv, priv->mmcaddr, mmc);
+
+ s->MACMergeFrameAssErrorCount = mmc->mmc_rx_packet_assembly_err_cntr;
+ s->MACMergeFrameAssOkCount = mmc->mmc_rx_packet_assembly_ok_cntr;
+ s->MACMergeFrameSmdErrorCount = mmc->mmc_rx_packet_smd_err_cntr;
+ s->MACMergeFragCountRx = mmc->mmc_rx_fpe_fragment_cntr;
+ s->MACMergeFragCountTx = mmc->mmc_tx_fpe_fragment_cntr;
+ s->MACMergeHoldCount = mmc->mmc_tx_hold_req_cntr;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -1301,6 +1394,9 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+ .get_mm = stmmac_get_mm,
+ .set_mm = stmmac_set_mm,
+ .get_mm_stats = stmmac_get_mm_stats,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d9fca8d1227c..d3895d7eecfc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -968,18 +968,31 @@ static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+ unsigned long flags;
- if (is_up && *hs_enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
- MPACKET_VERIFY);
+ timer_shutdown_sync(&fpe_cfg->verify_timer);
+
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
+
+ if (is_up && fpe_cfg->pmac_enabled) {
+ /* VERIFY process requires pmac enabled when NIC comes up */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, true);
+
+ /* New link => maybe new partner => new verification process */
+ stmmac_fpe_apply(priv);
} else {
- *lo_state = FPE_STATE_OFF;
- *lp_state = FPE_STATE_OFF;
+ /* No link => turn off EFPE */
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false, false);
}
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
static void stmmac_mac_link_down(struct phylink_config *config,
@@ -3358,27 +3371,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
}
}
-static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
-{
- char *name;
-
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
- clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- name = priv->wq_name;
- sprintf(name, "%s-fpe", priv->dev->name);
-
- priv->fpe_wq = create_singlethread_workqueue(name);
- if (!priv->fpe_wq) {
- netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
-
- return -ENOMEM;
- }
- netdev_info(priv->dev, "FPE workqueue start");
-
- return 0;
-}
-
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
@@ -3533,13 +3525,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_set_hw_vlan_mode(priv, priv->hw);
- if (priv->dma_cap.fpesel) {
- stmmac_fpe_start_wq(priv);
-
- if (priv->plat->fpe_cfg->enable)
- stmmac_fpe_handshake(priv, true);
- }
-
return 0;
}
@@ -4036,18 +4021,6 @@ static int stmmac_open(struct net_device *dev)
return ret;
}
-static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
-{
- set_bit(__FPE_REMOVING, &priv->fpe_task_state);
-
- if (priv->fpe_wq) {
- destroy_workqueue(priv->fpe_wq);
- priv->fpe_wq = NULL;
- }
-
- netdev_info(priv->dev, "FPE workqueue stop");
-}
-
/**
* stmmac_release - close entry point of the driver
* @dev : device pointer.
@@ -4095,10 +4068,10 @@ static int stmmac_release(struct net_device *dev)
stmmac_release_ptp(priv);
- pm_runtime_put(priv->device);
-
if (priv->dma_cap.fpesel)
- stmmac_fpe_stop_wq(priv);
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
+
+ pm_runtime_put(priv->device);
return 0;
}
@@ -5982,45 +5955,31 @@ static int stmmac_set_features(struct net_device *netdev,
static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
{
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
-
- if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
- return;
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
- /* If LP has sent verify mPacket, LP is FPE capable */
- if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
- if (*lp_state < FPE_STATE_CAPABLE)
- *lp_state = FPE_STATE_CAPABLE;
+ /* This is interrupt context, just spin_lock() */
+ spin_lock(&fpe_cfg->lock);
- /* If user has requested FPE enable, quickly response */
- if (*hs_enable)
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_RESPONSE);
- }
+ if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
+ goto unlock_out;
- /* If Local has sent verify mPacket, Local is FPE capable */
- if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
- if (*lo_state < FPE_STATE_CAPABLE)
- *lo_state = FPE_STATE_CAPABLE;
- }
+ /* LP has sent verify mPacket */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_RESPONSE);
- /* If LP has sent response mPacket, LP is entering FPE ON */
- if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
- *lp_state = FPE_STATE_ENTERING_ON;
+ /* Local has sent verify mPacket */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
- /* If Local has sent response mPacket, Local is entering FPE ON */
- if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
- *lo_state = FPE_STATE_ENTERING_ON;
+ /* LP has sent response mPacket */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
+ fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
- if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
- !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
- priv->fpe_wq) {
- queue_work(priv->fpe_wq, &priv->fpe_task);
- }
+unlock_out:
+ spin_unlock(&fpe_cfg->lock);
}
static void stmmac_common_interrupt(struct stmmac_priv *priv)
@@ -6257,6 +6216,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_QUERY_CAPS:
return stmmac_tc_query_caps(priv, priv, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return stmmac_tc_setup_mqprio(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7376,68 +7337,87 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
return ret;
}
-#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
-static void stmmac_fpe_lp_task(struct work_struct *work)
+/**
+ * stmmac_fpe_verify_timer - Timer for MAC Merge verification
+ * @t: timer_list struct containing private info
+ *
+ * Verify the MAC Merge capability in the local TX direction, by
+ * transmitting Verify mPackets up to 3 times. Wait until link
+ * partner responds with a Response mPacket, otherwise fail.
+ */
+static void stmmac_fpe_verify_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
- fpe_task);
- struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
- enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
- enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
- bool *hs_enable = &fpe_cfg->hs_enable;
- bool *enable = &fpe_cfg->enable;
- int retries = 20;
-
- while (retries-- > 0) {
- /* Bail out immediately if FPE handshake is OFF */
- if (*lo_state == FPE_STATE_OFF || !*hs_enable)
- break;
-
- if (*lo_state == FPE_STATE_ENTERING_ON &&
- *lp_state == FPE_STATE_ENTERING_ON) {
- stmmac_fpe_configure(priv, priv->ioaddr,
- fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- *enable);
-
- netdev_info(priv->dev, "configured FPE\n");
+ struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
+ struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
+ fpe_cfg);
+ unsigned long flags;
+ bool rearm = false;
- *lo_state = FPE_STATE_ON;
- *lp_state = FPE_STATE_ON;
- netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
- break;
- }
+ spin_lock_irqsave(&fpe_cfg->lock, flags);
- if ((*lo_state == FPE_STATE_CAPABLE ||
- *lo_state == FPE_STATE_ENTERING_ON) &&
- *lp_state != FPE_STATE_ON) {
- netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
- *lo_state, *lp_state);
+ switch (fpe_cfg->status) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ if (fpe_cfg->verify_retries != 0) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- fpe_cfg,
- MPACKET_VERIFY);
+ fpe_cfg, MPACKET_VERIFY);
+ rearm = true;
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
}
- /* Sleep then retry */
- msleep(500);
+
+ fpe_cfg->verify_retries--;
+ break;
+
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ true, true);
+ break;
+
+ default:
+ break;
}
- clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ if (rearm) {
+ mod_timer(&fpe_cfg->verify_timer,
+ jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
+ }
+
+ spin_unlock_irqrestore(&fpe_cfg->lock, flags);
}
-void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
{
- if (priv->plat->fpe_cfg->hs_enable != enable) {
- if (enable) {
- stmmac_fpe_send_mpacket(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- MPACKET_VERIFY);
- } else {
- priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
- priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
- }
+ if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
+ fpe_cfg->verify_enabled &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
+ fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
+ timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
+ mod_timer(&fpe_cfg->verify_timer, jiffies);
+ }
+}
+
+void stmmac_fpe_apply(struct stmmac_priv *priv)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
+
+ /* If verification is disabled, configure FPE right away.
+ * Otherwise let the timer code do it.
+ */
+ if (!fpe_cfg->verify_enabled) {
+ stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ fpe_cfg->tx_enabled,
+ fpe_cfg->pmac_enabled);
+ } else {
+ fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
- priv->plat->fpe_cfg->hs_enable = enable;
+ if (netif_running(priv->dev))
+ stmmac_fpe_verify_timer_arm(fpe_cfg);
}
}
@@ -7555,9 +7535,6 @@ int stmmac_dvr_probe(struct device *device,
INIT_WORK(&priv->service_task, stmmac_service_task);
- /* Initialize Link Partner FPE workqueue */
- INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
-
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances
*/
@@ -7722,6 +7699,12 @@ int stmmac_dvr_probe(struct device *device,
mutex_init(&priv->lock);
+ priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
+ priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
+ priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
+ spin_lock_init(&priv->fpe_cfg.lock);
+
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
* changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -7895,16 +7878,8 @@ int stmmac_suspend(struct device *dev)
}
rtnl_unlock();
- if (priv->dma_cap.fpesel) {
- /* Disable FPE */
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use, false);
-
- stmmac_fpe_handshake(priv, false);
- stmmac_fpe_stop_wq(priv);
- }
+ if (priv->dma_cap.fpesel)
+ timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
priv->speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 996f2bcd07a2..832998bc020b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -282,16 +282,6 @@ static int tc_init(struct stmmac_priv *priv)
if (ret)
return -ENOMEM;
- if (!priv->plat->fpe_cfg) {
- priv->plat->fpe_cfg = devm_kzalloc(priv->device,
- sizeof(*priv->plat->fpe_cfg),
- GFP_KERNEL);
- if (!priv->plat->fpe_cfg)
- return -ENOMEM;
- } else {
- memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
- }
-
/* Fail silently as we can still use remaining features, e.g. CBS */
if (!dma_cap->frpsel)
return 0;
@@ -941,9 +931,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct netlink_ext_ack *extack = qopt->mqprio.extack;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
- bool fpe = false;
int i, ret = 0;
u64 ctr;
@@ -1028,16 +1018,12 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
switch (qopt->entries[i].command) {
case TC_TAPRIO_CMD_SET_GATES:
- if (fpe)
- return -EINVAL;
break;
case TC_TAPRIO_CMD_SET_AND_HOLD:
gates |= BIT(0);
- fpe = true;
break;
case TC_TAPRIO_CMD_SET_AND_RELEASE:
gates &= ~BIT(0);
- fpe = true;
break;
default:
return -EOPNOTSUPP;
@@ -1068,16 +1054,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
tc_taprio_map_maxsdu_txq(priv, qopt);
- if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->est_lock);
- return -EOPNOTSUPP;
- }
-
- /* Actual FPE register configuration will be done after FPE handshake
- * is success.
- */
- priv->plat->fpe_cfg->enable = fpe;
-
ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->est_lock);
@@ -1086,12 +1062,10 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
goto disable;
}
- netdev_info(priv->dev, "configured EST\n");
-
- if (fpe) {
- stmmac_fpe_handshake(priv, true);
- netdev_info(priv->dev, "start FPE handshake\n");
- }
+ ret = stmmac_fpe_map_preemption_class(priv, priv->dev, extack,
+ qopt->mqprio.preemptible_tcs);
+ if (ret)
+ goto disable;
return 0;
@@ -1109,16 +1083,7 @@ disable:
mutex_unlock(&priv->est_lock);
}
- priv->plat->fpe_cfg->enable = false;
- stmmac_fpe_configure(priv, priv->ioaddr,
- priv->plat->fpe_cfg,
- priv->plat->tx_queues_to_use,
- priv->plat->rx_queues_to_use,
- false);
- netdev_info(priv->dev, "disabled FPE\n");
-
- stmmac_fpe_handshake(priv, false);
- netdev_info(priv->dev, "stop FPE handshake\n");
+ stmmac_fpe_map_preemption_class(priv, priv->dev, extack, 0);
return ret;
}
@@ -1174,6 +1139,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return err;
}
+static int tc_setup_taprio_without_fpe(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ if (!qopt->mqprio.preemptible_tcs)
+ return tc_setup_taprio(priv, qopt);
+
+ NL_SET_ERR_MSG_MOD(qopt->mqprio.extack,
+ "taprio with FPE is not implemented for this MAC");
+
+ return -EOPNOTSUPP;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1198,6 +1175,13 @@ static int tc_query_caps(struct stmmac_priv *priv,
struct tc_query_caps_base *base)
{
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
@@ -1214,6 +1198,81 @@ static int tc_query_caps(struct stmmac_priv *priv,
}
}
+static void stmmac_reset_tc_mqprio(struct net_device *ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
+ stmmac_fpe_map_preemption_class(priv, ndev, extack, 0);
+}
+
+static int tc_setup_dwmac510_mqprio(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ struct netlink_ext_ack *extack = mqprio->extack;
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ u32 offset, count, num_stack_tx_queues = 0;
+ struct net_device *ndev = priv->dev;
+ u32 num_tc = qopt->num_tc;
+ int err;
+
+ if (!num_tc) {
+ stmmac_reset_tc_mqprio(ndev, extack);
+ return 0;
+ }
+
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
+
+ for (u32 tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+ }
+
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
+
+ err = stmmac_fpe_map_preemption_class(priv, ndev, extack,
+ mqprio->preemptible_tcs);
+ if (err)
+ goto err_reset_tc;
+
+ return 0;
+
+err_reset_tc:
+ stmmac_reset_tc_mqprio(ndev, extack);
+
+ return err;
+}
+
+static int tc_setup_mqprio_unimplemented(struct stmmac_priv *priv,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ NL_SET_ERR_MSG_MOD(mqprio->extack,
+ "mqprio HW offload is not implemented for this MAC");
+ return -EOPNOTSUPP;
+}
+
+const struct stmmac_tc_ops dwmac4_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
+};
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1222,4 +1281,16 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
.query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_dwmac510_mqprio,
+};
+
+const struct stmmac_tc_ops dwxgmac_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio_without_fpe,
+ .setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
+ .setup_mqprio = tc_setup_mqprio_unimplemented,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 539d5ca82f52..9032444435e9 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -427,9 +427,9 @@ static void am65_cpsw_get_channels(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
- ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
- ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_rx = AM65_CPSW_MAX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_QUEUES;
+ ch->rx_count = common->rx_ch_num_flows;
ch->tx_count = common->tx_ch_num;
}
@@ -447,9 +447,8 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
if (common->usage_count)
return -EBUSY;
- am65_cpsw_nuss_remove_tx_chns(common);
-
- return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+ return am65_cpsw_nuss_update_tx_rx_chns(common, chs->tx_count,
+ chs->rx_count);
}
static void
@@ -913,80 +912,64 @@ static void am65_cpsw_get_mm_stats(struct net_device *ndev,
s->MACMergeHoldCount = readl(base + AM65_CPSW_STATN_IET_TX_HOLD);
}
-static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
- struct kernel_ethtool_coalesce *kernel_coal,
- struct netlink_ext_ack *extack)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- coal->rx_coalesce_usecs = common->rx_pace_timeout / 1000;
- coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
-
- return 0;
-}
-
static int am65_cpsw_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout / 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ coal->rx_coalesce_usecs = rx_flow->rx_pace_timeout / 1000;
+
return 0;
}
-static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+static int am65_cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- struct am65_cpsw_tx_chn *tx_chn;
-
- tx_chn = &common->tx_chns[0];
-
- if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
- return -EINVAL;
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
- return -EINVAL;
-
- common->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
- tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
-
- return 0;
+ return am65_cpsw_get_per_queue_coalesce(ndev, 0, coal);
}
static int am65_cpsw_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
struct ethtool_coalesce *coal)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_rx_flow *rx_flow;
struct am65_cpsw_tx_chn *tx_chn;
- if (queue >= AM65_CPSW_MAX_TX_QUEUES)
+ if (queue >= AM65_CPSW_MAX_QUEUES)
return -EINVAL;
tx_chn = &common->tx_chns[queue];
-
- if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20) {
- dev_info(common->dev, "defaulting to min value of 20us for tx-usecs for tx-%u\n",
- queue);
- coal->tx_coalesce_usecs = 20;
- }
+ if (coal->tx_coalesce_usecs && coal->tx_coalesce_usecs < 20)
+ return -EINVAL;
tx_chn->tx_pace_timeout = coal->tx_coalesce_usecs * 1000;
+ rx_flow = &common->rx_chns.flows[queue];
+ if (coal->rx_coalesce_usecs && coal->rx_coalesce_usecs < 20)
+ return -EINVAL;
+
+ rx_flow->rx_pace_timeout = coal->rx_coalesce_usecs * 1000;
+
return 0;
}
+static int am65_cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return am65_cpsw_set_per_queue_coalesce(ndev, 0, coal);
+}
+
const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
.begin = am65_cpsw_ethtool_op_begin,
.complete = am65_cpsw_ethtool_op_complete,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b7e5d0fb5d19..cbe99017cbfa 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -138,7 +138,7 @@
AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
-/* Number of TX/RX descriptors */
+/* Number of TX/RX descriptors per channel/flow */
#define AM65_CPSW_MAX_TX_DESC 500
#define AM65_CPSW_MAX_RX_DESC 500
@@ -150,6 +150,7 @@
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+#define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
/* CPPI streaming packet interface */
#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
@@ -331,7 +332,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct page *page)
+ struct page *page, u32 flow_idx)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
@@ -364,7 +365,8 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = page_address(page);
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, flow_idx,
+ desc_rx, desc_dma);
}
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
@@ -399,22 +401,27 @@ static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
- int i;
+ int id, port;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
- rxq = &common->ports[i].xdp_rxq;
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- if (xdp_rxq_info_is_reg(rxq))
- xdp_rxq_info_unreg(rxq);
- }
+ rxq = &common->ports[port].xdp_rxq[id];
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
- if (rx_chn->page_pool) {
- page_pool_destroy(rx_chn->page_pool);
- rx_chn->page_pool = NULL;
+ if (flow->page_pool) {
+ page_pool_destroy(flow->page_pool);
+ flow->page_pool = NULL;
+ }
}
}
@@ -428,31 +435,44 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
.nid = dev_to_node(common->dev),
.dev = common->dev,
.dma_dir = DMA_BIDIRECTIONAL,
- .napi = &common->napi_rx,
+ /* .napi set dynamically */
};
+ struct am65_cpsw_rx_flow *flow;
struct xdp_rxq_info *rxq;
struct page_pool *pool;
- int i, ret;
-
- pool = page_pool_create(&pp_params);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ int id, port, ret;
+
+ for (id = 0; id < common->rx_ch_num_flows; id++) {
+ flow = &rx_chn->flows[id];
+ pp_params.napi = &flow->napi_rx;
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool)) {
+ ret = PTR_ERR(pool);
+ goto err;
+ }
- rx_chn->page_pool = pool;
+ flow->page_pool = pool;
- for (i = 0; i < common->port_num; i++) {
- if (!common->ports[i].ndev)
- continue;
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (port = 0; port < common->port_num; port++) {
+ if (!common->ports[port].ndev)
+ continue;
- rxq = &common->ports[i].xdp_rxq;
+ rxq = &common->ports[port].xdp_rxq[id];
- ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
+ id, flow->napi_rx.napi_id);
+ if (ret)
+ goto err;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- goto err;
+ ret = xdp_rxq_info_reg_mem_model(rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool);
+ if (ret)
+ goto err;
+ }
}
return 0;
@@ -497,25 +517,27 @@ static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_ch
desc_idx);
}
-static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
struct page *page,
bool allow_direct,
int desc_idx)
{
- page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
- rx_chn->pages[desc_idx] = NULL;
+ page_pool_put_full_page(flow->page_pool, page, allow_direct);
+ flow->pages[desc_idx] = NULL;
}
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
- struct am65_cpsw_rx_chn *rx_chn = data;
+ struct am65_cpsw_rx_flow *flow = data;
struct cppi5_host_desc_t *desc_rx;
+ struct am65_cpsw_rx_chn *rx_chn;
dma_addr_t buf_dma;
u32 buf_dma_len;
void *page_addr;
void **swdata;
int desc_idx;
+ rx_chn = &flow->common->rx_chns;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
page_addr = *swdata;
@@ -526,7 +548,7 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
rx_chn->dsize_log2);
- am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
+ am65_cpsw_put_page(flow, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -602,7 +624,8 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
- int port_idx, i, ret, tx;
+ int port_idx, i, ret, tx, flow_idx;
+ struct am65_cpsw_rx_flow *flow;
u32 val, port_mask;
struct page *page;
@@ -670,27 +693,26 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
return ret;
}
- for (i = 0; i < rx_chn->descs_num; i++) {
- page = page_pool_dev_alloc_pages(rx_chn->page_pool);
- if (!page) {
- ret = -ENOMEM;
- if (i)
+ for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
+ page = page_pool_dev_alloc_pages(flow->page_pool);
+ if (!page) {
+ dev_err(common->dev, "cannot allocate page in flow %d\n",
+ flow_idx);
+ ret = -ENOMEM;
goto fail_rx;
+ }
+ flow->pages[i] = page;
- return ret;
- }
- rx_chn->pages[i] = page;
-
- ret = am65_cpsw_nuss_rx_push(common, page);
- if (ret < 0) {
- dev_err(common->dev,
- "cannot submit page to channel rx: %d\n",
- ret);
- am65_cpsw_put_page(rx_chn, page, false, i);
- if (i)
+ ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit page to rx channel flow %d, error %d\n",
+ flow_idx, ret);
+ am65_cpsw_put_page(flow, page, false, i);
goto fail_rx;
-
- return ret;
+ }
}
}
@@ -700,6 +722,14 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
goto fail_rx;
}
+ for (i = 0; i < common->rx_ch_num_flows ; i++) {
+ napi_enable(&rx_chn->flows[i].napi_rx);
+ if (rx_chn->flows[i].irq_disabled) {
+ rx_chn->flows[i].irq_disabled = false;
+ enable_irq(rx_chn->flows[i].irq);
+ }
+ }
+
for (tx = 0; tx < common->tx_ch_num; tx++) {
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
@@ -711,12 +741,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
napi_enable(&tx_chn[tx].napi_tx);
}
- napi_enable(&common->napi_rx);
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- enable_irq(rx_chn->irq);
- }
-
dev_dbg(common->dev, "cpsw_nuss started\n");
return 0;
@@ -727,11 +751,24 @@ fail_tx:
tx--;
}
+ for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
+ flow = &rx_chn->flows[flow_idx];
+ if (!flow->irq_disabled) {
+ disable_irq(flow->irq);
+ flow->irq_disabled = true;
+ }
+ napi_disable(&flow->napi_rx);
+ }
+
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
- am65_cpsw_nuss_rx_cleanup, 0);
+ for (i = 0; i < common->rx_ch_num_flows; i--)
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+
+ am65_cpsw_destroy_xdp_rxqs(common);
+
return ret;
}
@@ -780,12 +817,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
dev_err(common->dev, "rx teardown timeout\n");
}
- napi_disable(&common->napi_rx);
- hrtimer_cancel(&common->rx_hrtimer);
-
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ napi_disable(&rx_chn->flows[i].napi_rx);
+ hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, &rx_chn->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
+ }
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -794,10 +831,6 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
- for (i = 0; i < rx_chn->descs_num; i++) {
- if (rx_chn->pages[i])
- am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
- }
am65_cpsw_destroy_xdp_rxqs(common);
dev_dbg(common->dev, "cpsw_nuss stopped\n");
@@ -868,7 +901,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
goto runtime_put;
}
- ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ ret = netif_set_real_num_rx_queues(ndev, common->rx_ch_num_flows);
if (ret) {
dev_err(common->dev, "cannot set real number of rx queues\n");
goto runtime_put;
@@ -992,12 +1025,12 @@ pool_free:
return ret;
}
-static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
struct xdp_buff *xdp,
int desc_idx, int cpu, int *len)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
struct net_device *ndev = port->ndev;
struct am65_cpsw_ndev_stats *stats;
@@ -1026,7 +1059,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
ret = AM65_CPSW_XDP_PASS;
goto out;
case XDP_TX:
- tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -1068,7 +1101,8 @@ drop:
}
page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(rx_chn, page, true, desc_idx);
+ am65_cpsw_put_page(flow, page, true, desc_idx);
+
out:
return ret;
}
@@ -1106,11 +1140,12 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
}
-static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx, int cpu, int *xdp_state)
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
+ int cpu, int *xdp_state)
{
- struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_common *common = flow->common;
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
@@ -1120,6 +1155,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_port *port;
int headroom, desc_idx, ret;
struct net_device *ndev;
+ u32 flow_idx = flow->id;
struct sk_buff *skb;
struct xdp_buff xdp;
void *page_addr;
@@ -1174,10 +1210,10 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
}
if (port->xdp_prog) {
- xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, desc_idx,
cpu, &pkt_len);
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1195,7 +1231,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
skb_mark_for_recycle(skb);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
+ napi_gro_receive(&flow->napi_rx, skb);
stats = this_cpu_ptr(ndev_priv->stats);
@@ -1205,24 +1241,24 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
u64_stats_update_end(&stats->syncp);
allocate:
- new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ new_page = page_pool_dev_alloc_pages(flow->page_pool);
if (unlikely(!new_page)) {
dev_err(dev, "page alloc failed\n");
return -ENOMEM;
}
- rx_chn->pages[desc_idx] = new_page;
+ flow->pages[desc_idx] = new_page;
if (netif_dormant(ndev)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
requeue:
- ret = am65_cpsw_nuss_rx_push(common, new_page);
+ ret = am65_cpsw_nuss_rx_push(common, new_page, flow_idx);
if (WARN_ON(ret < 0)) {
- am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
+ am65_cpsw_put_page(flow, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -1232,38 +1268,32 @@ requeue:
static enum hrtimer_restart am65_cpsw_nuss_rx_timer_callback(struct hrtimer *timer)
{
- struct am65_cpsw_common *common =
- container_of(timer, struct am65_cpsw_common, rx_hrtimer);
+ struct am65_cpsw_rx_flow *flow = container_of(timer,
+ struct am65_cpsw_rx_flow,
+ rx_hrtimer);
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
return HRTIMER_NORESTART;
}
static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
- struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
- int flow = AM65_CPSW_MAX_RX_FLOWS;
+ struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
+ struct am65_cpsw_common *common = flow->common;
int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
int num_rx = 0;
- /* process every flow */
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow, cpu,
- &xdp_state);
- xdp_state_or |= xdp_state;
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
+ /* process only this flow */
+ cur_budget = budget;
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ xdp_state_or |= xdp_state;
+ if (ret)
break;
+ num_rx++;
}
if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
@@ -1272,14 +1302,14 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
- if (common->rx_irq_disabled) {
- common->rx_irq_disabled = false;
- if (unlikely(common->rx_pace_timeout)) {
- hrtimer_start(&common->rx_hrtimer,
- ns_to_ktime(common->rx_pace_timeout),
+ if (flow->irq_disabled) {
+ flow->irq_disabled = false;
+ if (unlikely(flow->rx_pace_timeout)) {
+ hrtimer_start(&flow->rx_hrtimer,
+ ns_to_ktime(flow->rx_pace_timeout),
HRTIMER_MODE_REL_PINNED);
} else {
- enable_irq(common->rx_chns.irq);
+ enable_irq(flow->irq);
}
}
}
@@ -1527,11 +1557,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
{
- struct am65_cpsw_common *common = dev_id;
+ struct am65_cpsw_rx_flow *flow = dev_id;
- common->rx_irq_disabled = true;
+ flow->irq_disabled = true;
disable_irq_nosync(irq);
- napi_schedule(&common->napi_rx);
+ napi_schedule(&flow->napi_rx);
return IRQ_HANDLED;
}
@@ -2176,7 +2206,7 @@ static void am65_cpsw_nuss_free_tx_chns(void *data)
}
}
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
int i;
@@ -2191,15 +2221,9 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
devm_free_irq(dev, tx_chn->irq, tx_chn);
netif_napi_del(&tx_chn->napi_tx);
-
- if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- memset(tx_chn, 0, sizeof(*tx_chn));
}
+
+ am65_cpsw_nuss_free_tx_chns(common);
}
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
@@ -2331,19 +2355,22 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
}
-static void am65_cpsw_nuss_remove_rx_chns(void *data)
+static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common *common)
{
- struct am65_cpsw_common *common = data;
struct device *dev = common->dev;
struct am65_cpsw_rx_chn *rx_chn;
+ struct am65_cpsw_rx_flow *flows;
+ int i;
rx_chn = &common->rx_chns;
+ flows = rx_chn->flows;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
- if (!(rx_chn->irq < 0))
- devm_free_irq(dev, rx_chn->irq, common);
-
- netif_napi_del(&common->napi_rx);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (!(flows[i].irq < 0))
+ devm_free_irq(dev, flows[i].irq, &flows[i]);
+ netif_napi_del(&flows[i].napi_rx);
+ }
am65_cpsw_nuss_free_rx_chns(common);
@@ -2356,6 +2383,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_flow *flow;
u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -2364,12 +2392,21 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
AM65_CPSW_NAV_SW_DATA_SIZE);
rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_num = common->rx_ch_num_flows;
rx_cfg.flow_id_base = common->rx_flow_id_base;
/* init all flows */
rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
+ rx_chn->descs_num = max_desc_num * rx_cfg.flow_id_num;
+
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ flow = &rx_chn->flows[i];
+ flow->page_pool = NULL;
+ flow->pages = devm_kcalloc(dev, AM65_CPSW_MAX_RX_DESC,
+ sizeof(*flow->pages), GFP_KERNEL);
+ if (!flow->pages)
+ return -ENOMEM;
+ }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -2392,13 +2429,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->dsize_log2 = __fls(hdesc_size_out);
WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
- rx_chn->page_pool = NULL;
-
- rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
- sizeof(*rx_chn->pages), GFP_KERNEL);
- if (!rx_chn->pages)
- return -ENOMEM;
-
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -2422,6 +2452,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
};
+ flow = &rx_chn->flows[i];
+ flow->id = i;
+ flow->common = common;
+
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
@@ -2438,30 +2472,37 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
i);
- rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
-
- if (rx_chn->irq < 0) {
+ flow->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (flow->irq <= 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
- rx_chn->irq);
- ret = rx_chn->irq;
+ flow->irq);
+ ret = flow->irq;
goto err;
}
- }
-
- netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll);
- hrtimer_init(&common->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- common->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
- ret = devm_request_irq(dev, rx_chn->irq,
- am65_cpsw_nuss_rx_irq,
- IRQF_TRIGGER_HIGH, dev_name(dev), common);
- if (ret) {
- dev_err(dev, "failure requesting rx irq %u, %d\n",
- rx_chn->irq, ret);
- goto err;
+ snprintf(flow->name,
+ sizeof(flow->name), "%s-rx%d",
+ dev_name(dev), i);
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+ hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+
+ ret = devm_request_irq(dev, flow->irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ flow->name, flow);
+ if (ret) {
+ dev_err(dev, "failure requesting rx %d irq %u, %d\n",
+ i, flow->irq, ret);
+ goto err;
+ }
}
+ /* setup classifier to route priorities to flows */
+ cpsw_ale_classifier_setup_default(common->ale, common->rx_ch_num_flows);
+
err:
i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
if (i) {
@@ -2705,8 +2746,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
/* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev,
sizeof(struct am65_cpsw_ndev_priv),
- AM65_CPSW_MAX_TX_QUEUES,
- AM65_CPSW_MAX_RX_QUEUES);
+ AM65_CPSW_MAX_QUEUES,
+ AM65_CPSW_MAX_QUEUES);
if (!port->ndev) {
dev_err(dev, "error allocating slave net_device %u\n",
port->port_id);
@@ -3303,9 +3344,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
}
- for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ for (i = 0; i < common->rx_ch_num_flows; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
+ &rx_chan->flows[i],
+ am65_cpsw_nuss_rx_cleanup, 0);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
@@ -3346,12 +3388,21 @@ err_cleanup_ndev:
return ret;
}
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx)
{
int ret;
+ am65_cpsw_nuss_remove_tx_chns(common);
+ am65_cpsw_nuss_remove_rx_chns(common);
+
common->tx_ch_num = num_tx;
+ common->rx_ch_num_flows = num_rx;
ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ ret = am65_cpsw_nuss_init_rx_chns(common);
return ret;
}
@@ -3481,6 +3532,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
common->pf_p0_rx_ptype_rrobin = false;
common->default_vlan = 1;
@@ -3672,8 +3724,10 @@ static int am65_cpsw_nuss_resume(struct device *dev)
return ret;
/* If RX IRQ was disabled before suspend, keep it disabled */
- if (common->rx_irq_disabled)
- disable_irq(common->rx_chns.irq);
+ for (i = 0; i < common->rx_ch_num_flows; i++) {
+ if (common->rx_chns.flows[i].irq_disabled)
+ disable_irq(common->rx_chns.flows[i].irq);
+ }
am65_cpts_resume(common->cpts);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e2ce2be320bd..dc8d544230dc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -21,9 +21,7 @@ struct am65_cpts;
#define HOST_PORT_NUM 0
-#define AM65_CPSW_MAX_TX_QUEUES 8
-#define AM65_CPSW_MAX_RX_QUEUES 1
-#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_MAX_QUEUES 8 /* both TX & RX */
#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
@@ -58,7 +56,7 @@ struct am65_cpsw_port {
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
struct bpf_prog *xdp_prog;
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq[AM65_CPSW_MAX_QUEUES];
/* Only for suspend resume context */
u32 vid_context;
};
@@ -94,16 +92,27 @@ struct am65_cpsw_tx_chn {
u32 rate_mbps;
};
+struct am65_cpsw_rx_flow {
+ u32 id;
+ struct napi_struct napi_rx;
+ struct am65_cpsw_common *common;
+ int irq;
+ bool irq_disabled;
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout;
+ struct page_pool *page_pool;
+ struct page **pages;
+ char name[32];
+};
+
struct am65_cpsw_rx_chn {
struct device *dev;
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
- struct page_pool *page_pool;
- struct page **pages;
u32 descs_num;
unsigned char dsize_log2;
- int irq;
+ struct am65_cpsw_rx_flow flows[AM65_CPSW_MAX_QUEUES];
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
@@ -145,16 +154,12 @@ struct am65_cpsw_common {
u32 tx_ch_rate_msk;
u32 rx_flow_id_base;
- struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_QUEUES];
struct completion tdown_complete;
atomic_t tdown_cnt;
+ int rx_ch_num_flows;
struct am65_cpsw_rx_chn rx_chns;
- struct napi_struct napi_rx;
-
- bool rx_irq_disabled;
- struct hrtimer rx_hrtimer;
- unsigned long rx_pace_timeout;
u32 nuss_ver;
u32 cpsw_ver;
@@ -203,8 +208,8 @@ struct am65_cpsw_ndev_priv {
#define am65_common_get_host(common) (&(common)->host)
#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
-#define am65_cpsw_napi_to_common(pnapi) \
- container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_rx_flow(pnapi) \
+ container_of(pnapi, struct am65_cpsw_rx_flow, napi_rx)
#define am65_cpsw_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
@@ -215,8 +220,8 @@ struct am65_cpsw_ndev_priv {
extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
-void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
-int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common *common,
+ int num_tx, int num_rx);
bool am65_cpsw_port_dev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 64bf22cd860c..0d5d8917c70b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -45,6 +46,24 @@
#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+#define ALE_POLICER_PORT_OUI 0x100
+#define ALE_POLICER_DA_SA 0x104
+#define ALE_POLICER_VLAN 0x108
+#define ALE_POLICER_ETHERTYPE_IPSA 0x10c
+#define ALE_POLICER_IPDA 0x110
+#define ALE_POLICER_PIR 0x118
+#define ALE_POLICER_CIR 0x11c
+#define ALE_POLICER_TBL_CTL 0x120
+#define ALE_POLICER_CTL 0x124
+#define ALE_POLICER_TEST_CTL 0x128
+#define ALE_POLICER_HIT_STATUS 0x12c
+#define ALE_THREAD_DEF 0x134
+#define ALE_THREAD_CTL 0x138
+#define ALE_THREAD_VAL 0x13c
+
+#define ALE_POLICER_TBL_WRITE_ENABLE BIT(31)
+#define ALE_POLICER_TBL_INDEX_MASK GENMASK(4, 0)
+
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
/* ALE_AGING_TIMER */
@@ -76,7 +95,7 @@ enum {
* @dev_id: ALE version/SoC id
* @features: features supported by ALE
* @tbl_entries: number of ALE entries
- * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @reg_fields: pointer to array of register field configuration
* @nu_switch_ale: NU Switch ALE
* @vlan_entry_tbl: ALE vlan entry fields description tbl
*/
@@ -84,7 +103,7 @@ struct cpsw_ale_dev_id {
const char *dev_id;
u32 features;
u32 tbl_entries;
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
bool nu_switch_ale;
const struct ale_entry_fld *vlan_entry_tbl;
};
@@ -102,7 +121,7 @@ struct cpsw_ale_dev_id {
#define ALE_UCAST_TOUCHED 3
#define ALE_TABLE_SIZE_MULTIPLIER 1024
-#define ALE_STATUS_SIZE_MASK 0x1f
+#define ALE_POLICER_SIZE_MULTIPLIER 8
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
@@ -1292,25 +1311,108 @@ void cpsw_ale_stop(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct reg_field ale_fields_cpsw[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 15),
+};
+
+static const struct reg_field ale_fields_cpsw_nu[] = {
+ /* CPSW_ALE_IDVER_REG */
+ [MINOR_VER] = REG_FIELD(ALE_IDVER, 0, 7),
+ [MAJOR_VER] = REG_FIELD(ALE_IDVER, 8, 10),
+ /* CPSW_ALE_STATUS_REG */
+ [ALE_ENTRIES] = REG_FIELD(ALE_STATUS, 0, 7),
+ [ALE_POLICERS] = REG_FIELD(ALE_STATUS, 8, 15),
+ /* CPSW_ALE_POLICER_PORT_OUI_REG */
+ [POL_PORT_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 31, 31),
+ [POL_TRUNK_ID] = REG_FIELD(ALE_POLICER_PORT_OUI, 30, 30),
+ [POL_PORT_NUM] = REG_FIELD(ALE_POLICER_PORT_OUI, 25, 25),
+ [POL_PRI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 19, 19),
+ [POL_PRI_VAL] = REG_FIELD(ALE_POLICER_PORT_OUI, 16, 18),
+ [POL_OUI_MEN] = REG_FIELD(ALE_POLICER_PORT_OUI, 15, 15),
+ [POL_OUI_INDEX] = REG_FIELD(ALE_POLICER_PORT_OUI, 0, 5),
+
+ /* CPSW_ALE_POLICER_DA_SA_REG */
+ [POL_DST_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 31, 31),
+ [POL_DST_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 16, 21),
+ [POL_SRC_MEN] = REG_FIELD(ALE_POLICER_DA_SA, 15, 15),
+ [POL_SRC_INDEX] = REG_FIELD(ALE_POLICER_DA_SA, 0, 5),
+
+ /* CPSW_ALE_POLICER_VLAN_REG */
+ [POL_OVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 31, 31),
+ [POL_OVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 16, 21),
+ [POL_IVLAN_MEN] = REG_FIELD(ALE_POLICER_VLAN, 15, 15),
+ [POL_IVLAN_INDEX] = REG_FIELD(ALE_POLICER_VLAN, 0, 5),
+
+ /* CPSW_ALE_POLICER_ETHERTYPE_IPSA_REG */
+ [POL_ETHERTYPE_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 31, 31),
+ [POL_ETHERTYPE_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 16, 21),
+ [POL_IPSRC_MEN] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 15, 15),
+ [POL_IPSRC_INDEX] = REG_FIELD(ALE_POLICER_ETHERTYPE_IPSA, 0, 5),
+
+ /* CPSW_ALE_POLICER_IPDA_REG */
+ [POL_IPDST_MEN] = REG_FIELD(ALE_POLICER_IPDA, 31, 31),
+ [POL_IPDST_INDEX] = REG_FIELD(ALE_POLICER_IPDA, 16, 21),
+
+ /* CPSW_ALE_POLICER_TBL_CTL_REG */
+ /**
+ * REG_FIELDS not defined for this as fields cannot be correctly
+ * used independently
+ */
+
+ /* CPSW_ALE_POLICER_CTL_REG */
+ [POL_EN] = REG_FIELD(ALE_POLICER_CTL, 31, 31),
+ [POL_RED_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 29, 29),
+ [POL_YELLOW_DROP_EN] = REG_FIELD(ALE_POLICER_CTL, 28, 28),
+ [POL_YELLOW_THRESH] = REG_FIELD(ALE_POLICER_CTL, 24, 26),
+ [POL_POL_MATCH_MODE] = REG_FIELD(ALE_POLICER_CTL, 22, 23),
+ [POL_PRIORITY_THREAD_EN] = REG_FIELD(ALE_POLICER_CTL, 21, 21),
+ [POL_MAC_ONLY_DEF_DIS] = REG_FIELD(ALE_POLICER_CTL, 20, 20),
+
+ /* CPSW_ALE_POLICER_TEST_CTL_REG */
+ [POL_TEST_CLR] = REG_FIELD(ALE_POLICER_TEST_CTL, 31, 31),
+ [POL_TEST_CLR_RED] = REG_FIELD(ALE_POLICER_TEST_CTL, 30, 30),
+ [POL_TEST_CLR_YELLOW] = REG_FIELD(ALE_POLICER_TEST_CTL, 29, 29),
+ [POL_TEST_CLR_SELECTED] = REG_FIELD(ALE_POLICER_TEST_CTL, 28, 28),
+ [POL_TEST_ENTRY] = REG_FIELD(ALE_POLICER_TEST_CTL, 0, 4),
+
+ /* CPSW_ALE_POLICER_HIT_STATUS_REG */
+ [POL_STATUS_HIT] = REG_FIELD(ALE_POLICER_HIT_STATUS, 31, 31),
+ [POL_STATUS_HIT_RED] = REG_FIELD(ALE_POLICER_HIT_STATUS, 30, 30),
+ [POL_STATUS_HIT_YELLOW] = REG_FIELD(ALE_POLICER_HIT_STATUS, 29, 29),
+
+ /* CPSW_ALE_THREAD_DEF_REG */
+ [ALE_DEFAULT_THREAD_EN] = REG_FIELD(ALE_THREAD_DEF, 15, 15),
+ [ALE_DEFAULT_THREAD_VAL] = REG_FIELD(ALE_THREAD_DEF, 0, 5),
+
+ /* CPSW_ALE_THREAD_CTL_REG */
+ [ALE_THREAD_CLASS_INDEX] = REG_FIELD(ALE_THREAD_CTL, 0, 4),
+
+ /* CPSW_ALE_THREAD_VAL_REG */
+ [ALE_THREAD_ENABLE] = REG_FIELD(ALE_THREAD_VAL, 15, 15),
+ [ALE_THREAD_VALUE] = REG_FIELD(ALE_THREAD_VAL, 0, 5),
+};
+
static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
{
/* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
.dev_id = "cpsw",
.tbl_entries = 1024,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
/* 66ak2h_xgbe */
.dev_id = "66ak2h-xgbe",
.tbl_entries = 2048,
- .major_ver_mask = 0xff,
+ .reg_fields = ale_fields_cpsw,
.vlan_entry_tbl = vlan_entry_cpsw,
},
{
.dev_id = "66ak2el",
.features = CPSW_ALE_F_STATUS_REG,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1318,7 +1420,7 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "66ak2g",
.features = CPSW_ALE_F_STATUS_REG,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
@@ -1326,20 +1428,20 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.dev_id = "am65x-cpsw2g",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
.tbl_entries = 64,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.nu_switch_ale = true,
.vlan_entry_tbl = vlan_entry_nu,
},
{
.dev_id = "j721e-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
{
.dev_id = "am64-cpswxg",
.features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
- .major_ver_mask = 0x7,
+ .reg_fields = ale_fields_cpsw_nu,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
.tbl_entries = 512,
},
@@ -1361,47 +1463,80 @@ cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
return NULL;
}
+static const struct regmap_config ale_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .name = "cpsw-ale",
+};
+
+static int cpsw_ale_regfield_init(struct cpsw_ale *ale)
+{
+ const struct reg_field *reg_fields = ale->params.reg_fields;
+ struct device *dev = ale->params.dev;
+ struct regmap *regmap = ale->regmap;
+ int i;
+
+ for (i = 0; i < ALE_FIELDS_MAX; i++) {
+ ale->fields[i] = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[i]);
+ if (IS_ERR(ale->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap field %d\n", i);
+ return PTR_ERR(ale->fields[i]);
+ }
+ }
+
+ return 0;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ u32 ale_entries, rev_major, rev_minor, policers;
const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
- u32 rev, ale_entries;
+ int ret;
ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
if (!ale_dev_id)
return ERR_PTR(-EINVAL);
params->ale_entries = ale_dev_id->tbl_entries;
- params->major_ver_mask = ale_dev_id->major_ver_mask;
params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+ params->reg_fields = ale_dev_id->reg_fields;
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
+ ale->regmap = devm_regmap_init_mmio(params->dev, params->ale_regs,
+ &ale_regmap_cfg);
+ if (IS_ERR(ale->regmap)) {
+ dev_err(params->dev, "Couldn't create CPSW ALE regmap\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ale->params = *params;
+ ret = cpsw_ale_regfield_init(ale);
+ if (ret)
+ return ERR_PTR(ret);
ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
GFP_KERNEL);
if (!ale->p0_untag_vid_mask)
return ERR_PTR(-ENOMEM);
- ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
ale->features = ale_dev_id->features;
ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
- rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- ale->version =
- (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
- ALE_VERSION_MINOR(rev);
+ regmap_field_read(ale->fields[MINOR_VER], &rev_minor);
+ regmap_field_read(ale->fields[MAJOR_VER], &rev_major);
+ ale->version = rev_major << 8 | rev_minor;
dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
- ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
- ALE_VERSION_MINOR(rev));
+ rev_major, rev_minor);
if (ale->features & CPSW_ALE_F_STATUS_REG &&
!ale->params.ale_entries) {
- ale_entries =
- readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
- ALE_STATUS_SIZE_MASK;
+ regmap_field_read(ale->fields[ALE_ENTRIES], &ale_entries);
/* ALE available on newer NetCP switches has introduced
* a register, ALE_STATUS, to indicate the size of ALE
* table which shows the size as a multiple of 1024 entries.
@@ -1415,8 +1550,20 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
+
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.num_policers) {
+ regmap_field_read(ale->fields[ALE_POLICERS], &policers);
+ if (!policers)
+ return ERR_PTR(-EINVAL);
+
+ policers *= ALE_POLICER_SIZE_MULTIPLIER;
+ ale->params.num_policers = policers;
+ }
+
dev_info(ale->params.dev,
- "ALE Table size %ld\n", ale->params.ale_entries);
+ "ALE Table size %ld, Policers %ld\n", ale->params.ale_entries,
+ ale->params.num_policers);
/* set default bits for existing h/w */
ale->port_mask_bits = ale->params.ale_ports;
@@ -1480,3 +1627,97 @@ u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
{
return ale ? ale->params.ale_entries : 0;
}
+
+/* Reads the specified policer index into ALE POLICER registers */
+static void cpsw_ale_policer_read_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* Writes the ALE POLICER registers into the specified policer index */
+static void cpsw_ale_policer_write_idx(struct cpsw_ale *ale, u32 idx)
+{
+ idx &= ALE_POLICER_TBL_INDEX_MASK;
+ idx |= ALE_POLICER_TBL_WRITE_ENABLE;
+ writel_relaxed(idx, ale->params.ale_regs + ALE_POLICER_TBL_CTL);
+}
+
+/* enables/disables the custom thread value for the specified policer index */
+static void cpsw_ale_policer_thread_idx_enable(struct cpsw_ale *ale, u32 idx,
+ u32 thread_id, bool enable)
+{
+ regmap_field_write(ale->fields[ALE_THREAD_CLASS_INDEX], idx);
+ regmap_field_write(ale->fields[ALE_THREAD_VALUE], thread_id);
+ regmap_field_write(ale->fields[ALE_THREAD_ENABLE], enable ? 1 : 0);
+}
+
+/* Disable all policer entries and thread mappings */
+static void cpsw_ale_policer_reset(struct cpsw_ale *ale)
+{
+ int i;
+
+ for (i = 0; i < ale->params.num_policers ; i++) {
+ cpsw_ale_policer_read_idx(ale, i);
+ regmap_field_write(ale->fields[POL_PORT_MEN], 0);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 0);
+ regmap_field_write(ale->fields[POL_OUI_MEN], 0);
+ regmap_field_write(ale->fields[POL_DST_MEN], 0);
+ regmap_field_write(ale->fields[POL_SRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_OVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_IVLAN_MEN], 0);
+ regmap_field_write(ale->fields[POL_ETHERTYPE_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPSRC_MEN], 0);
+ regmap_field_write(ale->fields[POL_IPDST_MEN], 0);
+ regmap_field_write(ale->fields[POL_EN], 0);
+ regmap_field_write(ale->fields[POL_RED_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_YELLOW_DROP_EN], 0);
+ regmap_field_write(ale->fields[POL_PRIORITY_THREAD_EN], 0);
+
+ cpsw_ale_policer_thread_idx_enable(ale, i, 0, 0);
+ }
+}
+
+/* Default classifier is to map 8 user priorities to N receive channels */
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch)
+{
+ int pri, idx;
+ /* IEEE802.1D-2004, Standard for Local and metropolitan area networks
+ * Table G-2 - Traffic type acronyms
+ * Table G-3 - Defining traffic types
+ * User priority values 1 and 2 effectively communicate a lower
+ * priority than 0. In the below table 0 is assigned to higher priority
+ * thread than 1 and 2 wherever possible.
+ * The below table maps which thread the user priority needs to be
+ * sent to for a given number of threads (RX channels). Upper threads
+ * have higher priority.
+ * e.g. if number of threads is 8 then user priority 0 will map to
+ * pri_thread_map[8-1][0] i.e. thread 2
+ */
+ int pri_thread_map[8][8] = { { 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 0, 0, 0, 0, 1, 1, 1, 1, },
+ { 0, 0, 0, 0, 1, 1, 2, 2, },
+ { 1, 0, 0, 1, 2, 2, 3, 3, },
+ { 1, 0, 0, 1, 2, 3, 4, 4, },
+ { 1, 0, 0, 2, 3, 4, 5, 5, },
+ { 1, 0, 0, 2, 3, 4, 5, 6, },
+ { 2, 0, 1, 3, 4, 5, 6, 7, } };
+
+ cpsw_ale_policer_reset(ale);
+
+ /* use first 8 classifiers to map 8 (DSCP/PCP) priorities to channels */
+ for (pri = 0; pri < 8; pri++) {
+ idx = pri;
+
+ /* Classifier 'idx' match on priority 'pri' */
+ cpsw_ale_policer_read_idx(ale, idx);
+ regmap_field_write(ale->fields[POL_PRI_VAL], pri);
+ regmap_field_write(ale->fields[POL_PRI_MEN], 1);
+ cpsw_ale_policer_write_idx(ale, idx);
+
+ /* Map Classifier 'idx' to thread provided by the map */
+ cpsw_ale_policer_thread_idx_enable(ale, idx,
+ pri_thread_map[num_rx_ch - 1][pri],
+ 1);
+ }
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6779ee111d57..1e4e9a3dd234 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -8,11 +8,14 @@
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
+struct reg_fields;
+
struct cpsw_ale_params {
struct device *dev;
void __iomem *ale_regs;
unsigned long ale_ageout; /* in secs */
unsigned long ale_entries;
+ unsigned long num_policers;
unsigned long ale_ports;
/* NU Switch has specific handling as number of bits in ALE entries
* are different than other versions of ALE. Also there are specific
@@ -20,19 +23,69 @@ struct cpsw_ale_params {
* to identify this hardware.
*/
bool nu_switch_ale;
- /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
- * pass it from caller.
- */
- u32 major_ver_mask;
+ const struct reg_field *reg_fields;
const char *dev_id;
unsigned long bus_freq;
};
struct ale_entry_fld;
+struct regmap;
+
+enum ale_fields {
+ MINOR_VER,
+ MAJOR_VER,
+ ALE_ENTRIES,
+ ALE_POLICERS,
+ POL_PORT_MEN,
+ POL_TRUNK_ID,
+ POL_PORT_NUM,
+ POL_PRI_MEN,
+ POL_PRI_VAL,
+ POL_OUI_MEN,
+ POL_OUI_INDEX,
+ POL_DST_MEN,
+ POL_DST_INDEX,
+ POL_SRC_MEN,
+ POL_SRC_INDEX,
+ POL_OVLAN_MEN,
+ POL_OVLAN_INDEX,
+ POL_IVLAN_MEN,
+ POL_IVLAN_INDEX,
+ POL_ETHERTYPE_MEN,
+ POL_ETHERTYPE_INDEX,
+ POL_IPSRC_MEN,
+ POL_IPSRC_INDEX,
+ POL_IPDST_MEN,
+ POL_IPDST_INDEX,
+ POL_EN,
+ POL_RED_DROP_EN,
+ POL_YELLOW_DROP_EN,
+ POL_YELLOW_THRESH,
+ POL_POL_MATCH_MODE,
+ POL_PRIORITY_THREAD_EN,
+ POL_MAC_ONLY_DEF_DIS,
+ POL_TEST_CLR,
+ POL_TEST_CLR_RED,
+ POL_TEST_CLR_YELLOW,
+ POL_TEST_CLR_SELECTED,
+ POL_TEST_ENTRY,
+ POL_STATUS_HIT,
+ POL_STATUS_HIT_RED,
+ POL_STATUS_HIT_YELLOW,
+ ALE_DEFAULT_THREAD_EN,
+ ALE_DEFAULT_THREAD_VAL,
+ ALE_THREAD_CLASS_INDEX,
+ ALE_THREAD_ENABLE,
+ ALE_THREAD_VALUE,
+ /* terminator */
+ ALE_FIELDS_MAX,
+};
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
+ struct regmap *regmap;
+ struct regmap_field *fields[ALE_FIELDS_MAX];
unsigned long ageout;
u32 version;
u32 features;
@@ -140,5 +193,6 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
+void cpsw_ale_classifier_setup_default(struct cpsw_ale *ale, int num_rx_ch);
#endif
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1d57b047817b..b54bffda027b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -426,9 +426,9 @@ enum WX_MSCA_CMD_value {
#define WX_MIN_RXD 128
#define WX_MIN_TXD 128
-/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
-#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8
-#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Transmit and Receive Descriptors must be a multiple of 128 */
+#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 128
+#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128
#define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
#define VMDQ_P(p) p
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 43d81b40f761..d64b8abcf018 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -529,8 +529,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
- * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
* @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
@@ -609,9 +607,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- int csum_offload_on_tx_path;
- int csum_offload_on_rx_path;
-
u32 coalesce_count_rx;
u32 coalesce_usec_rx;
u32 coalesce_count_tx;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 374dff70ef0d..ea7d7c03f48e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1188,9 +1188,7 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
+ } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -2639,38 +2637,28 @@ static int axienet_probe(struct platform_device *pdev)
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_PARTIAL_TX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
- /* Can checksum TCP/UDP over IPv4. */
- ndev->features |= NETIF_F_IP_CSUM;
+ /* Can checksum any contiguous range */
+ ndev->features |= NETIF_F_HW_CSUM;
break;
case 2:
- lp->csum_offload_on_tx_path =
- XAE_FEATURE_FULL_TX_CSUM;
lp->features |= XAE_FEATURE_FULL_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
break;
- default:
- lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
if (!ret) {
switch (value) {
case 1:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_PARTIAL_RX_CSUM;
lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
case 2:
- lp->csum_offload_on_rx_path =
- XAE_FEATURE_FULL_RX_CSUM;
lp->features |= XAE_FEATURE_FULL_RX_CSUM;
+ ndev->features |= NETIF_F_RXCSUM;
break;
- default:
- lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 79232f5cc088..d0036a856039 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -255,6 +255,7 @@ static void netkit_setup(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_DISABLE_NETPOLL;
dev->lltx = true;
dev->ethtool_ops = &netkit_ethtool_ops;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index efeb643c1373..fc247f479257 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -271,8 +271,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -292,8 +291,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825 */
- if (!dp83822 || !dp83822->fx_enabled)
+ if (!dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -691,10 +689,9 @@ static int dp83822_read_straps(struct phy_device *phydev)
return 0;
}
-static int dp83822_probe(struct phy_device *phydev)
+static int dp8382x_probe(struct phy_device *phydev)
{
struct dp83822_private *dp83822;
- int ret;
dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
GFP_KERNEL);
@@ -703,6 +700,20 @@ static int dp83822_probe(struct phy_device *phydev)
phydev->priv = dp83822;
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
+
+ dp83822 = phydev->priv;
+
ret = dp83822_read_straps(phydev);
if (ret)
return ret;
@@ -717,14 +728,11 @@ static int dp83822_probe(struct phy_device *phydev)
static int dp83826_probe(struct phy_device *phydev)
{
- struct dp83822_private *dp83822;
-
- dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
- GFP_KERNEL);
- if (!dp83822)
- return -ENOMEM;
+ int ret;
- phydev->priv = dp83822;
+ ret = dp8382x_probe(phydev);
+ if (ret)
+ return ret;
dp83826_of_init(phydev);
@@ -795,6 +803,7 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp8382x_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp8382x_config_init, \
.get_wol = dp83822_get_wol, \
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 5732ad65e7f9..a5ef8fe50704 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -175,6 +175,9 @@
#define LAN887X_LED_LINK_ACT_ANY_SPEED 0x0
/* MX chip top registers */
+#define LAN887X_CHIP_HARD_RST 0xf03e
+#define LAN887X_CHIP_HARD_RST_RESET BIT(0)
+
#define LAN887X_CHIP_SOFT_RST 0xf03f
#define LAN887X_CHIP_SOFT_RST_RESET BIT(0)
@@ -188,9 +191,60 @@
#define LAN887X_EFUSE_READ_DAT9_SGMII_DIS BIT(9)
#define LAN887X_EFUSE_READ_DAT9_MAC_MODE GENMASK(1, 0)
+#define LAN887X_CALIB_CONFIG_100 0x437
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL BIT(5)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE BIT(4)
+#define LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE BIT(3)
+#define LAN887X_CALIB_CONFIG_100_VAL \
+ (LAN887X_CALIB_CONFIG_100_CBL_DIAG_CLK_ALGN_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_STB_SYNC_MODE |\
+ LAN887X_CALIB_CONFIG_100_CBL_DIAG_USE_LOCAL_SMPL)
+
+#define LAN887X_MAX_PGA_GAIN_100 0x44f
+#define LAN887X_MIN_PGA_GAIN_100 0x450
+#define LAN887X_START_CBL_DIAG_100 0x45a
+#define LAN887X_CBL_DIAG_DONE BIT(1)
+#define LAN887X_CBL_DIAG_START BIT(0)
+#define LAN887X_CBL_DIAG_STOP 0x0
+
+#define LAN887X_CBL_DIAG_TDR_THRESH_100 0x45b
+#define LAN887X_CBL_DIAG_AGC_THRESH_100 0x45c
+#define LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100 0x45d
+#define LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100 0x45e
+#define LAN887X_CBL_DIAG_CYC_CONFIG_100 0x45f
+#define LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100 0x460
+#define LAN887X_CBL_DIAG_MIN_PGA_GAIN_100 0x462
+#define LAN887X_CBL_DIAG_AGC_GAIN_100 0x497
+#define LAN887X_CBL_DIAG_POS_PEAK_VALUE_100 0x499
+#define LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100 0x49a
+#define LAN887X_CBL_DIAG_POS_PEAK_TIME_100 0x49c
+#define LAN887X_CBL_DIAG_NEG_PEAK_TIME_100 0x49d
+
+#define MICROCHIP_CABLE_NOISE_MARGIN 20
+#define MICROCHIP_CABLE_TIME_MARGIN 89
+#define MICROCHIP_CABLE_MIN_TIME_DIFF 96
+#define MICROCHIP_CABLE_MAX_TIME_DIFF \
+ (MICROCHIP_CABLE_MIN_TIME_DIFF + MICROCHIP_CABLE_TIME_MARGIN)
+
#define DRIVER_AUTHOR "Nisar Sayed <[email protected]>"
#define DRIVER_DESC "Microchip LAN87XX/LAN937x/LAN887x T1 PHY driver"
+/* TEST_MODE_NORMAL: Non-hybrid results to calculate cable status(open/short/ok)
+ * TEST_MODE_HYBRID: Hybrid results to calculate distance to fault
+ */
+enum cable_diag_mode {
+ TEST_MODE_NORMAL,
+ TEST_MODE_HYBRID
+};
+
+/* CD_TEST_INIT: Cable test is initated
+ * CD_TEST_DONE: Cable test is done
+ */
+enum cable_diag_state {
+ CD_TEST_INIT,
+ CD_TEST_DONE
+};
+
struct access_ereg_val {
u8 mode;
u8 bank;
@@ -1420,6 +1474,362 @@ static void lan887x_get_strings(struct phy_device *phydev, u8 *data)
ethtool_puts(&data, lan887x_hw_stats[i].string);
}
+static int lan887x_cd_reset(struct phy_device *phydev,
+ enum cable_diag_state cd_done)
+{
+ u16 val;
+ int rc;
+
+ /* Chip hard-reset */
+ rc = phy_write_mmd(phydev, MDIO_MMD_VEND1, LAN887X_CHIP_HARD_RST,
+ LAN887X_CHIP_HARD_RST_RESET);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for reset to complete */
+ rc = phy_read_poll_timeout(phydev, MII_PHYSID2, val,
+ ((val & GENMASK(15, 4)) ==
+ (PHY_ID_LAN887X & GENMASK(15, 4))),
+ 5000, 50000, true);
+ if (rc < 0)
+ return rc;
+
+ if (cd_done == CD_TEST_DONE) {
+ /* Cable diagnostics complete. Restore PHY. */
+ rc = lan887x_phy_setup(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_init(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = lan887x_phy_reconfig(phydev);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_prep(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ static const struct lan887x_regwr_map values[] = {
+ {MDIO_MMD_VEND1, LAN887X_MAX_PGA_GAIN_100, 0x1f},
+ {MDIO_MMD_VEND1, LAN887X_MIN_PGA_GAIN_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TDR_THRESH_100, 0x1},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_AGC_THRESH_100, 0x3c},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_WAIT_CONFIG_100, 0x0},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100, 0x46},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_CYC_CONFIG_100, 0x5a},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_TX_PULSE_CONFIG_100, 0x44d5},
+ {MDIO_MMD_VEND1, LAN887X_CBL_DIAG_MIN_PGA_GAIN_100, 0x0},
+
+ };
+ int rc;
+
+ rc = lan887x_cd_reset(phydev, CD_TEST_INIT);
+ if (rc < 0)
+ return rc;
+
+ /* Forcing DUT to master mode, as we don't care about
+ * mode during diagnostics
+ */
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x80b0, 0x0038);
+ if (rc < 0)
+ return rc;
+
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CALIB_CONFIG_100, 0,
+ LAN887X_CALIB_CONFIG_100_VAL);
+ if (rc < 0)
+ return rc;
+
+ for (int i = 0; i < ARRAY_SIZE(values); i++) {
+ rc = phy_write_mmd(phydev, values[i].mmd, values[i].reg,
+ values[i].val);
+ if (rc < 0)
+ return rc;
+
+ if (mode &&
+ values[i].reg == LAN887X_CBL_DIAG_MAX_WAIT_CONFIG_100) {
+ rc = phy_write_mmd(phydev, values[i].mmd,
+ values[i].reg, 0xa);
+ if (rc < 0)
+ return rc;
+ }
+ }
+
+ if (mode == TEST_MODE_HYBRID) {
+ rc = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN887X_AFE_PORT_TESTBUS_CTRL4,
+ BIT(0), BIT(0));
+ if (rc < 0)
+ return rc;
+ }
+
+ /* HW_INIT 100T1, Get DUT running in 100T1 mode */
+ rc = phy_modify_mmd(phydev, MDIO_MMD_VEND1, LAN887X_REG_REG26,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN,
+ LAN887X_REG_REG26_HW_INIT_SEQ_EN);
+ if (rc < 0)
+ return rc;
+
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for 50ms
+ */
+ msleep(50);
+
+ /* Start cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_START);
+}
+
+static int lan887x_cable_test_chk(struct phy_device *phydev,
+ enum cable_diag_mode mode)
+{
+ int val;
+ int rc;
+
+ if (mode == TEST_MODE_HYBRID) {
+ /* Cable diag requires hard reset and is sensitive regarding the delays.
+ * Hard reset is expected into and out of cable diag.
+ * Wait for cable diag to complete.
+ * Minimum wait time is 50ms if the condition is not a match.
+ */
+ rc = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100, val,
+ ((val & LAN887X_CBL_DIAG_DONE) ==
+ LAN887X_CBL_DIAG_DONE),
+ 50000, 500000, false);
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & LAN887X_CBL_DIAG_DONE) != LAN887X_CBL_DIAG_DONE)
+ return -EAGAIN;
+ }
+
+ /* Stop cable diag */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+}
+
+static int lan887x_cable_test_start(struct phy_device *phydev)
+{
+ int rc, ret;
+
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ return rc;
+ }
+
+ return 0;
+}
+
+static int lan887x_cable_test_report(struct phy_device *phydev)
+{
+ int pos_peak_cycle, pos_peak_cycle_hybrid, pos_peak_in_phases;
+ int pos_peak_time, pos_peak_time_hybrid, neg_peak_time;
+ int neg_peak_cycle, neg_peak_in_phases;
+ int pos_peak_in_phases_hybrid;
+ int gain_idx, gain_idx_hybrid;
+ int pos_peak_phase_hybrid;
+ int pos_peak, neg_peak;
+ int distance;
+ int detect;
+ int length;
+ int ret;
+ int rc;
+
+ /* Read non-hybrid results */
+ gain_idx = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx < 0) {
+ rc = gain_idx;
+ goto error;
+ }
+
+ pos_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_VALUE_100);
+ if (pos_peak < 0) {
+ rc = pos_peak;
+ goto error;
+ }
+
+ neg_peak = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_VALUE_100);
+ if (neg_peak < 0) {
+ rc = neg_peak;
+ goto error;
+ }
+
+ pos_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time < 0) {
+ rc = pos_peak_time;
+ goto error;
+ }
+
+ neg_peak_time = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_NEG_PEAK_TIME_100);
+ if (neg_peak_time < 0) {
+ rc = neg_peak_time;
+ goto error;
+ }
+
+ /* Calculate non-hybrid values */
+ pos_peak_cycle = (pos_peak_time >> 7) & 0x7f;
+ pos_peak_in_phases = (pos_peak_cycle * 96) + (pos_peak_time & 0x7f);
+ neg_peak_cycle = (neg_peak_time >> 7) & 0x7f;
+ neg_peak_in_phases = (neg_peak_cycle * 96) + (neg_peak_time & 0x7f);
+
+ /* Deriving the status of cable */
+ if (pos_peak > MICROCHIP_CABLE_NOISE_MARGIN &&
+ neg_peak > MICROCHIP_CABLE_NOISE_MARGIN && gain_idx >= 0) {
+ if (pos_peak_in_phases > neg_peak_in_phases &&
+ ((pos_peak_in_phases - neg_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((pos_peak_in_phases - neg_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ pos_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_SAME_SHORT;
+ } else if (neg_peak_in_phases > pos_peak_in_phases &&
+ ((neg_peak_in_phases - pos_peak_in_phases) >=
+ MICROCHIP_CABLE_MIN_TIME_DIFF) &&
+ ((neg_peak_in_phases - pos_peak_in_phases) <
+ MICROCHIP_CABLE_MAX_TIME_DIFF) &&
+ neg_peak_in_phases > 0) {
+ detect = LAN87XX_CABLE_TEST_OPEN;
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+ } else {
+ detect = LAN87XX_CABLE_TEST_OK;
+ }
+
+ if (detect == LAN87XX_CABLE_TEST_OK) {
+ distance = 0;
+ goto get_len;
+ }
+
+ /* Re-initialize PHY and start cable diag test */
+ rc = lan887x_cable_test_prep(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Wait for cable diag test completion */
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_HYBRID);
+ if (rc < 0)
+ goto cd_stop;
+
+ /* Read hybrid results */
+ gain_idx_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_AGC_GAIN_100);
+ if (gain_idx_hybrid < 0) {
+ rc = gain_idx_hybrid;
+ goto error;
+ }
+
+ pos_peak_time_hybrid = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_CBL_DIAG_POS_PEAK_TIME_100);
+ if (pos_peak_time_hybrid < 0) {
+ rc = pos_peak_time_hybrid;
+ goto error;
+ }
+
+ /* Calculate hybrid values to derive cable length to fault */
+ pos_peak_cycle_hybrid = (pos_peak_time_hybrid >> 7) & 0x7f;
+ pos_peak_phase_hybrid = pos_peak_time_hybrid & 0x7f;
+ pos_peak_in_phases_hybrid = pos_peak_cycle_hybrid * 96 +
+ pos_peak_phase_hybrid;
+
+ /* Distance to fault calculation.
+ * distance = (peak_in_phases - peak_in_phases_hybrid) *
+ * propagationconstant.
+ * constant to convert number of phases to meters
+ * propagationconstant = 0.015953
+ * (0.6811 * 2.9979 * 156.2499 * 0.0001 * 0.5)
+ * Applying constant 1.5953 as ethtool further devides by 100 to
+ * convert to meters.
+ */
+ if (detect == LAN87XX_CABLE_TEST_OPEN) {
+ distance = (((pos_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else if (detect == LAN87XX_CABLE_TEST_SAME_SHORT) {
+ distance = (((neg_peak_in_phases - pos_peak_in_phases_hybrid)
+ * 15953) / 10000);
+ } else {
+ distance = 0;
+ }
+
+get_len:
+ rc = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (rc < 0)
+ return rc;
+
+ length = ((u32)distance & GENMASK(15, 0));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, length);
+
+ return 0;
+
+cd_stop:
+ /* Stop cable diag */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ LAN887X_START_CBL_DIAG_100,
+ LAN887X_CBL_DIAG_STOP);
+ if (ret < 0)
+ return ret;
+
+error:
+ /* Cable diag test failed */
+ ret = lan887x_cd_reset(phydev, CD_TEST_DONE);
+ if (ret < 0)
+ return ret;
+
+ /* Return error in failure case */
+ return rc;
+}
+
+static int lan887x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int rc;
+
+ rc = lan887x_cable_test_chk(phydev, TEST_MODE_NORMAL);
+ if (rc < 0) {
+ /* Let PHY statemachine poll again */
+ if (rc == -EAGAIN)
+ return 0;
+ return rc;
+ }
+
+ /* Cable diag test complete */
+ *finished = true;
+
+ /* Retrieve test status and cable length to fault */
+ return lan887x_cable_test_report(phydev);
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN87XX),
@@ -1458,6 +1868,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN887X),
.name = "Microchip LAN887x T1 PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = lan887x_probe,
.get_features = lan887x_get_features,
.config_init = lan887x_phy_init,
@@ -1468,6 +1879,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_status = genphy_c45_read_status,
+ .cable_test_start = lan887x_cable_test_start,
+ .cable_test_get_status = lan887x_cable_test_get_status,
}
};
diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c
index 534ca7d1b061..3614839a8e51 100644
--- a/drivers/net/phy/microchip_t1s.c
+++ b/drivers/net/phy/microchip_t1s.c
@@ -268,6 +268,34 @@ static int lan86xx_read_status(struct phy_device *phydev)
return 0;
}
+/* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
+ * C45 registers space. If the PHY is discovered via C22 bus protocol it assumes
+ * it uses C22 protocol and always uses C22 registers indirect access to access
+ * C45 registers. This is because, we don't have a clean separation between
+ * C22/C45 register space and C22/C45 MDIO bus protocols. Resulting, PHY C45
+ * registers direct access can't be used which can save multiple SPI bus access.
+ * To support this feature, set .read_mmd/.write_mmd in the PHY driver to call
+ * .read_c45/.write_c45 in the OPEN Alliance framework
+ * drivers/net/ethernet/oa_tc6.c
+ */
+static int lan865x_phy_read_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_read(bus, addr, devnum, regnum);
+}
+
+static int lan865x_phy_write_mmd(struct phy_device *phydev, int devnum,
+ u16 regnum, u16 val)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int addr = phydev->mdio.addr;
+
+ return __mdiobus_c45_write(bus, addr, devnum, regnum, val);
+}
+
static struct phy_driver microchip_t1s_driver[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_LAN867X_REVB1),
@@ -285,6 +313,8 @@ static struct phy_driver microchip_t1s_driver[] = {
.features = PHY_BASIC_T1S_P2MP_FEATURES,
.config_init = lan865x_revb0_config_init,
.read_status = lan86xx_read_status,
+ .read_mmd = lan865x_phy_read_mmd,
+ .write_mmd = lan865x_phy_write_mmd,
.get_plca_cfg = genphy_c45_plca_get_cfg,
.set_plca_cfg = genphy_c45_plca_set_cfg,
.get_plca_status = genphy_c45_plca_get_status,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ab4e9fc03017..4309317de3d1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1636,6 +1636,48 @@ static int phylink_register_sfp(struct phylink *pl,
}
/**
+ * phylink_set_fixed_link() - set the fixed link
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @state: a pointer to a struct phylink_link_state.
+ *
+ * This function is used when the link parameters are known and do not change,
+ * making it suitable for certain types of network connections.
+ *
+ * Returns: zero on success or negative error code.
+ */
+int phylink_set_fixed_link(struct phylink *pl,
+ const struct phylink_link_state *state)
+{
+ const struct phy_setting *s;
+ unsigned long *adv;
+
+ if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
+ !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
+ return -EINVAL;
+
+ s = phy_lookup_setting(state->speed, state->duplex,
+ pl->supported, true);
+ if (!s)
+ return -EINVAL;
+
+ adv = pl->link_config.advertising;
+ linkmode_zero(adv);
+ linkmode_set_bit(s->bit, adv);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
+
+ pl->link_config.speed = state->speed;
+ pl->link_config.duplex = state->duplex;
+ pl->link_config.link = 1;
+ pl->link_config.an_complete = 1;
+
+ pl->cfg_link_an_mode = MLO_AN_FIXED;
+ pl->cur_link_an_mode = pl->cfg_link_an_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_set_fixed_link);
+
+/**
* phylink_create() - create a phylink instance
* @config: a pointer to the target &struct phylink_config
* @fwnode: a pointer to a &struct fwnode_handle describing the network
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 61ffa8851d7b..6f4781ec2b36 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -356,6 +356,9 @@ struct receive_queue {
struct xdp_rxq_info xsk_rxq_info;
struct xdp_buff **xsk_buffs;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -885,7 +888,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf)
+ if (buf && rq->do_dma)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -898,6 +901,11 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
head = page_address(rq->alloc_frag.page);
offset = buf - head;
@@ -923,42 +931,44 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- dma = head;
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->len = alloc_frag->size - sizeof(*dma);
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ rq->last_dma = dma;
+ }
- rq->last_dma = dma;
+ ++dma->ref;
}
- ++dma->ref;
-
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -967,19 +977,6 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
return buf;
}
-static void virtnet_rq_set_premapped(struct virtnet_info *vi)
-{
- int i;
-
- /* disable for big mode */
- if (!vi->mergeable_rx_bufs && vi->big_packets)
- return;
-
- for (i = 0; i < vi->max_queue_pairs; i++)
- /* error should never happen */
- BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
-}
-
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
{
struct virtnet_info *vi = vq->vdev->priv;
@@ -993,7 +990,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
return;
}
- if (!vi->big_packets || vi->mergeable_rx_bufs)
+ if (rq->do_dma)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
@@ -2430,7 +2427,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2544,7 +2542,8 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- virtnet_rq_unmap(rq, buf, 0);
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2701,7 +2700,7 @@ static int virtnet_receive_packets(struct virtnet_info *vi,
}
} else {
while (packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
@@ -5911,7 +5910,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].last_dma)
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -6109,8 +6108,6 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
- virtnet_rq_set_premapped(vi);
-
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index b93a64bf8190..35bfe7232e95 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1774,7 +1774,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
if (!arvif->is_started)
return -EINVAL;
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a5da32e87106..646e1737d4c4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1437,7 +1437,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
* by indicating that radar was detected.
*/
ath10k_warn(ar, "failed to start CAC: %d\n", ret);
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index fe2344598364..4861179b2217 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3990,7 +3990,7 @@ static void ath10k_radar_detected(struct ath10k *ar)
if (ar->dfs_block_radar_events)
ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
}
static void ath10k_radar_confirmation_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 76faa55fd0f3..09c37e19a168 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -407,11 +407,17 @@ struct ath11k_vif {
bool wpaie_present;
bool bcca_zero_sent;
bool do_not_send_tmpl;
- struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
struct ath11k_reg_tpc_power_info reg_tpc_info;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 2f6dd69d3be2..65d2bc0687c8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1305,18 +1305,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- u32 tx_success_bytes;
- u32 tx_retry_bytes;
- u32 tx_failed_bytes;
- u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- u16 tx_success_msdus;
- u16 tx_retry_msdus;
- u16 tx_failed_msdus;
- u16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
@@ -1364,17 +1352,6 @@ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
u32 success_bytes;
} __packed;
-struct htt_ppdu_stats_usr_cmn_array {
- struct htt_tlv tlv_hdr;
- u32 num_ppdu_stats;
- /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
- * elements.
- * tx_ppdu_stats_info is variable length, with length =
- * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
- */
- struct htt_tx_ppdu_stats_info tx_ppdu_info[];
-} __packed;
-
struct htt_ppdu_user_stats {
u16 peer_id;
u32 tlv_flags;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 2662092ee00a..87abfa547529 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -8358,7 +8358,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ar->hw, NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index cdfd43a7321a..7f2e9a9b4097 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -287,7 +287,6 @@ struct ath12k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
- struct ieee80211_chanctx_conf chanctx;
u32 key_cipher;
u8 tx_encap_type;
u8 vdev_stats_id;
@@ -295,6 +294,13 @@ struct ath12k_vif {
bool ps;
struct ath12k_vif_cache *cache;
struct ath12k_rekey_data rekey_data;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath12k_vif_iter {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index b77497c14ac4..2fb18b83b3ee 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1495,18 +1495,6 @@ struct htt_ppdu_stats_user_rate {
#define HTT_TX_INFO_PEERID(_flags) \
u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
-struct htt_tx_ppdu_stats_info {
- struct htt_tlv tlv_hdr;
- __le32 tx_success_bytes;
- __le32 tx_retry_bytes;
- __le32 tx_failed_bytes;
- __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
- __le16 tx_success_msdus;
- __le16 tx_retry_msdus;
- __le16 tx_failed_msdus;
- __le16 tx_duration; /* united in us */
-} __packed;
-
enum htt_ppdu_stats_usr_compln_status {
HTT_PPDU_STATS_USER_STATUS_OK,
HTT_PPDU_STATS_USER_STATUS_FILTERED,
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index a3248d977532..137394c36460 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -3663,7 +3663,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct ath12k_wmi_scan_req_arg arg = {};
+ struct ath12k_wmi_scan_req_arg *arg = NULL;
int ret;
int i;
bool create = true;
@@ -3745,42 +3745,47 @@ scan:
if (ret)
goto exit;
- ath12k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH12K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath12k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH12K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++)
- arg.ssid[i] = req->ssids[i];
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++)
+ arg->ssid[i] = req->ssids[i];
} else {
- arg.scan_f_passive = 1;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
-
- if (!arg.chan_list) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
- ret = ath12k_start_scan(ar, &arg);
+ ret = ath12k_start_scan(ar, arg);
if (ret) {
ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3790,14 +3795,15 @@ scan:
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
- msecs_to_jiffies(arg.max_scan_time +
+ msecs_to_jiffies(arg->max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index a76413320dbf..2cd3ff9b0164 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -6789,7 +6789,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ath12k_ar_to_hw(ar));
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
exit:
rcu_read_unlock();
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 11349218bc21..3689e12db9f7 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -280,7 +280,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe)
if (!pd->add_pulse(pd, pe, NULL))
return;
DFS_STAT_INC(sc, radar_detected);
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 8e18e9b4ef48..426caa057396 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -116,7 +116,7 @@ static ssize_t write_file_simulate_radar(struct file *file,
{
struct ath_softc *sc = file->private_data;
- ieee80211_radar_detected(sc->hw);
+ ieee80211_radar_detected(sc->hw, NULL);
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 0c7841f95228..a3733c9b484e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -716,8 +716,7 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
}
resubmit:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
usb_anchor_urb(urb, &hif_dev->rx_submitted);
ret = usb_submit_urb(urb, GFP_ATOMIC);
@@ -754,8 +753,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
case -ESHUTDOWN:
goto free_skb;
default:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
+ __skb_set_length(skb, 0);
goto resubmit;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 815f6b3c79fc..349aa3439502 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1135,7 +1135,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
offset = offsetof(struct brcmf_scan_params_v2_le, channel_list) +
n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
- length += sizeof(ssid_le) * n_ssids,
+ length += sizeof(ssid_le) * n_ssids;
ptr = (char *)params_le + offset;
for (i = 0; i < n_ssids; i++) {
memset(&ssid_le, 0, sizeof(ssid_le));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 8873904f51ec..59751f123571 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -13,7 +13,7 @@ iwlmvm-y += ptp.o
iwlmvm-y += time-sync.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
-iwlmvm-$(CONFIG_PM) += d3.o
+iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6f5b16eb4902..a327893c6dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1232,7 +1232,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* fast_resume will be cleared by iwl_mvm_fast_resume */
fast_resume = mvm->fast_resume;
@@ -1254,7 +1254,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
}
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index c9b69c3a61fc..ef07cff203b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -512,7 +512,7 @@ struct iwl_mvm_vif {
bool bf_enabled;
bool ba_enabled;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/* WoWLAN GTK rekey data */
struct {
u8 kck[NL80211_KCK_EXT_LEN];
@@ -1178,7 +1178,7 @@ struct iwl_mvm {
struct ieee80211_vif *p2p_device_vif;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
@@ -2306,7 +2306,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
extern const struct file_operations iwl_dbgfs_d3_test_ops;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 2cd89ccaef9a..4dd4a9d5c71f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -2155,6 +2155,7 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+#ifdef CONFIG_PM_SLEEP
static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -2163,11 +2164,13 @@ static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_mvm_stop_device(mvm);
-#ifdef CONFIG_PM
mvm->fast_resume = false;
-#endif
mutex_unlock(&mvm->mutex);
}
+#else
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 3c193074662b..d7be232f5739 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -116,11 +116,6 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
int8_t p2, int usesnr);
-int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
-
-int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
- uint16_t cmd_action);
-
int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
index 631b5da09f86..a5d4c09fb918 100644
--- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
@@ -484,12 +484,9 @@ void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
void lbtf_cmd_response_rx(struct lbtf_private *priv);
/* main.c */
-struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
- int *cfp_no);
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
const struct lbtf_ops *ops);
int lbtf_remove_card(struct lbtf_private *priv);
-int lbtf_start_card(struct lbtf_private *priv);
int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
void lbtf_bcn_sent(struct lbtf_private *priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index b90f922f1cdc..032b93a41d99 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -117,12 +117,12 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
dfs_cac_work);
chandef = priv->dfs_chandef;
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, MSG,
"CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
}
@@ -174,7 +174,7 @@ int mwifiex_stop_radar_detection(struct mwifiex_private *priv,
*/
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
- if (priv->wdev.cac_started) {
+ if (priv->wdev.links[0].cac_started) {
if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
mwifiex_dbg(priv->adapter, ERROR,
"failed to stop CAC in FW\n");
@@ -182,7 +182,8 @@ void mwifiex_abort_cac(struct mwifiex_private *priv)
"Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL,
+ 0);
}
}
@@ -221,7 +222,7 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
cfg80211_cac_event(priv->netdev,
&priv->dfs_chandef,
NL80211_RADAR_DETECTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
break;
default:
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 5209e1c7d2e7..fca3eea7ee84 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1906,7 +1906,7 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- if (!priv->bss_started && priv->wdev.cac_started) {
+ if (!priv->bss_started && priv->wdev.links[0].cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
mwifiex_abort_cac(priv);
}
@@ -4038,7 +4038,7 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return -EBUSY;
}
- if (priv->wdev.cac_started)
+ if (priv->wdev.links[0].cac_started)
return -EBUSY;
if (cfg80211_chandef_identical(&params->chandef,
@@ -4205,7 +4205,7 @@ static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_radar_params radar_params;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index e91def0afa14..d03129d5d24e 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1627,7 +1627,7 @@ struct host_cmd_ds_802_11_scan_rsp {
struct host_cmd_ds_802_11_scan_ext {
u32 reserved;
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct mwifiex_ie_types_bss_mode {
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 010e6ff91457..cab889af4c4a 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2534,8 +2534,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
ext_scan_resp = &resp->params.ext_scan;
tlv = (void *)ext_scan_resp->tlv_buffer;
- buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN
- - 1);
+ buf_left = le16_to_cpu(resp->size) - (sizeof(*ext_scan_resp) + S_DS_GEN);
while (buf_left >= sizeof(struct mwifiex_ie_types_header)) {
type = le16_to_cpu(tlv->type);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index bb291fe314fb..9d5561f44134 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -929,14 +929,19 @@ void mt76_update_survey(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_update_survey);
-void mt76_set_channel(struct mt76_phy *phy)
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel)
{
struct mt76_dev *dev = phy->dev;
- struct ieee80211_hw *hw = phy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
- bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
int timeout = HZ / 5;
+ int ret;
+ cancel_delayed_work_sync(&phy->mac_work);
+
+ mutex_lock(&dev->mutex);
+ set_bit(MT76_RESET, &phy->state);
+
+ mt76_worker_disable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
@@ -946,14 +951,34 @@ void mt76_set_channel(struct mt76_phy *phy)
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
+ phy->offchannel = offchannel;
if (!offchannel)
phy->main_chan = chandef->chan;
if (chandef->chan != phy->main_chan)
memset(phy->chan_state, 0, sizeof(*phy->chan_state));
+ mt76_worker_enable(&dev->tx_worker);
+
+ ret = dev->drv->set_channel(phy);
+
+ clear_bit(MT76_RESET, &phy->state);
+ mt76_worker_schedule(&dev->tx_worker);
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_update_channel(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+ bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+ return mt76_set_channel(phy, chandef, offchannel);
+}
+EXPORT_SYMBOL_GPL(mt76_update_channel);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
@@ -1484,21 +1509,32 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
+ enum mt76_sta_event ev;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE)
return mt76_sta_add(phy, vif, sta);
- if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- dev->drv->sta_assoc)
- dev->drv->sta_assoc(dev, vif, sta);
-
if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)
mt76_sta_remove(dev, vif, sta);
- return 0;
+ if (!dev->drv->sta_event)
+ return 0;
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return dev->drv->sta_event(dev, vif, sta, ev);
}
EXPORT_SYMBOL_GPL(mt76_sta_state);
@@ -1521,6 +1557,7 @@ void mt76_wcid_init(struct mt76_wcid *wcid)
{
INIT_LIST_HEAD(&wcid->tx_list);
skb_queue_head_init(&wcid->tx_pending);
+ skb_queue_head_init(&wcid->tx_offchannel);
INIT_LIST_HEAD(&wcid->list);
idr_init(&wcid->pktid);
@@ -1529,7 +1566,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_init);
void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- struct mt76_phy *phy = dev->phys[wcid->phy_idx];
+ struct mt76_phy *phy = mt76_dev_phy(dev, wcid->phy_idx);
struct ieee80211_hw *hw;
struct sk_buff_head list;
struct sk_buff *skb;
@@ -1697,14 +1734,15 @@ int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
+ bool is_2g = sband->band == NL80211_BAND_2GHZ;
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband != &dev->phy.sband_2g.sband)
+ if (!is_2g)
return 0;
idx &= ~BIT(2); /* short preamble */
- } else if (sband == &dev->phy.sband_2g.sband) {
+ } else if (is_2g) {
offset = 4;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index a8cafa39a56d..98da82b74094 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -73,6 +73,8 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp,
struct sk_buff **ret_skb)
{
+ unsigned int retry = 0;
+ struct sk_buff *orig_skb = NULL;
unsigned long expires;
int ret, seq;
@@ -81,6 +83,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
mutex_lock(&dev->mcu.mutex);
+ if (dev->mcu_ops->mcu_skb_prepare_msg) {
+ ret = dev->mcu_ops->mcu_skb_prepare_msg(dev, skb, cmd, &seq);
+ if (ret < 0)
+ goto out;
+ }
+
+retry:
+ orig_skb = skb_get(skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
if (ret < 0)
goto out;
@@ -94,6 +104,14 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
do {
skb = mt76_mcu_get_response(dev, expires);
+ if (!skb && !test_bit(MT76_MCU_RESET, &dev->phy.state) &&
+ retry++ < dev->mcu_ops->max_retry) {
+ dev_err(dev->dev, "Retry message %08x (seq %d)\n",
+ cmd, seq);
+ skb = orig_skb;
+ goto retry;
+ }
+
ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq);
if (!ret && ret_skb)
*ret_skb = skb;
@@ -101,7 +119,9 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
+
out:
+ dev_kfree_skb(orig_skb);
mutex_unlock(&dev->mcu.mutex);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4a58a78d5ed2..0b75a45ad2e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -162,8 +162,8 @@ enum mt76_dfs_state {
struct mt76_queue_buf {
dma_addr_t addr;
- u16 len;
- bool skip_unmap;
+ u16 len:15,
+ skip_unmap:1;
};
struct mt76_tx_info {
@@ -230,11 +230,14 @@ struct mt76_queue {
};
struct mt76_mcu_ops {
+ unsigned int max_retry;
u32 headroom;
u32 tailroom;
int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp);
+ int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb,
+ int cmd, int *seq);
int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, int *seq);
int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
@@ -347,6 +350,7 @@ struct mt76_wcid {
u8 hw_key_idx2;
u8 sta:1;
+ u8 sta_disabled:1;
u8 amsdu:1;
u8 phy_idx:2;
u8 link_id:4;
@@ -361,6 +365,7 @@ struct mt76_wcid {
struct list_head tx_list;
struct sk_buff_head tx_pending;
+ struct sk_buff_head tx_offchannel;
struct list_head list;
struct idr pktid;
@@ -466,6 +471,12 @@ enum {
MT76_STATE_WED_RESET,
};
+enum mt76_sta_event {
+ MT76_STA_EVENT_ASSOC,
+ MT76_STA_EVENT_AUTHORIZE,
+ MT76_STA_EVENT_DISASSOC,
+};
+
struct mt76_hw_cap {
bool has_2ghz;
bool has_5ghz;
@@ -487,6 +498,7 @@ struct mt76_driver_ops {
u8 mcs_rates;
void (*update_survey)(struct mt76_phy *phy);
+ int (*set_channel)(struct mt76_phy *phy);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -511,8 +523,8 @@ struct mt76_driver_ops {
int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -768,6 +780,7 @@ struct mt76_phy {
struct cfg80211_chan_def chandef;
struct ieee80211_channel *main_chan;
+ bool offchannel;
struct mt76_channel_state *chan_state;
enum mt76_dfs_state dfs_state;
@@ -1370,7 +1383,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
enum ieee80211_frame_release_type reason,
bool more_data);
bool mt76_has_tx_pending(struct mt76_phy *phy);
-void mt76_set_channel(struct mt76_phy *phy);
+int mt76_update_channel(struct mt76_phy *phy);
void mt76_update_survey(struct mt76_phy *phy);
void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
@@ -1484,6 +1497,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_phy *phy);
void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
+int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
+ bool offchannel);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index c223f7c19e6d..6457ee06bb5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -107,7 +107,7 @@ void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i, nframes;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
data.dev = dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index ea017f22fff2..863e5770df51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -29,7 +29,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
- u8 tid = 0, hwq = 0;
+ u8 qid, tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
@@ -57,7 +57,7 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_TAG1D_MASK;
- u8 qid = tid_to_ac[tid];
+ qid = tid_to_ac[tid];
hwq = wmm_queue_map[qid];
skb_set_queue_mapping(skb, qid);
} else if (ieee80211_is_data(hdr->frame_control)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index d951cb81df83..f5a6b03bc61d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -181,6 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
is_mt7688(dev))
dev->mphy.antenna_mask = 1;
+ dev->mphy.chainmask = dev->mphy.antenna_mask;
mt76_eeprom_override(&dev->mphy);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 6c55c72f28a2..86617a3e4328 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -15,9 +15,10 @@ const struct mt76_driver_ops mt7603_drv_ops = {
.rx_poll_complete = mt7603_rx_poll_complete,
.sta_ps = mt7603_sta_ps,
.sta_add = mt7603_sta_add,
- .sta_assoc = mt7603_sta_assoc,
+ .sta_event = mt7603_sta_event,
.sta_remove = mt7603_sta_remove,
.update_survey = mt7603_update_channel,
+ .set_channel = mt7603_set_channel,
};
static void
@@ -456,11 +457,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
+ u8 ext_pa_pwr;
int max_offset, cur_offset;
int i;
- if (ext_pa && is_mt7603(dev))
- target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
+ ext_pa_pwr = eeprom[MT_EE_TX_POWER_TSSI_OFF];
+ if (ext_pa && is_mt7603(dev) && ext_pa_pwr != 0 && ext_pa_pwr != 0xff)
+ target_power = ext_pa_pwr & ~BIT(7);
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index f35fa643c0da..574f74ad325d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -133,30 +133,24 @@ void mt7603_init_edcca(struct mt7603_dev *dev)
mt7603_edcca_set_strict(dev, false);
}
-static int
-mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
+int mt7603_set_channel(struct mt76_phy *mphy)
{
- struct mt7603_dev *dev = hw->priv;
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev, mt76);
+ struct cfg80211_chan_def *def = &mphy->chandef;
+
u8 *rssi_data = (u8 *)dev->mt76.eeprom.data;
int idx, ret;
u8 bw = MT_BW_20;
bool failed = false;
- ieee80211_stop_queues(hw);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
mt7603_beacon_set_timer(dev, -1, 0);
- mt76_set_channel(&dev->mphy);
mt7603_mac_stop(dev);
if (def->width == NL80211_CHAN_WIDTH_40)
bw = MT_BW_40;
- dev->mphy.chandef = *def;
mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw);
ret = mt7603_mcu_set_channel(dev);
if (ret) {
@@ -180,10 +174,6 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_mac_set_timing(dev);
mt7603_mac_start(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_txq_schedule_all(&dev->mphy);
-
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
@@ -199,17 +189,14 @@ mt7603_set_channel(struct ieee80211_hw *hw, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
- if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ if (!mphy->offchannel)
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
- mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
if (failed)
mt7603_mac_work(&dev->mphy.mac_work.work);
- ieee80211_wake_queues(hw);
-
return ret;
}
@@ -227,7 +214,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
if (err)
return err;
- return mt7603_set_channel(hw, &mphy->chandef);
+ return mt76_update_channel(mphy);
}
static int
@@ -238,7 +225,7 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
- ret = mt7603_set_channel(hw, &hw->conf.chandef);
+ ret = mt76_update_channel(&dev->mphy);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
@@ -368,13 +355,19 @@ mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return ret;
}
-void
-mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int
+mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
- mt7603_wtbl_update_cap(dev, sta);
+ if (ev == MT76_STA_EVENT_ASSOC) {
+ mutex_lock(&dev->mt76.mutex);
+ mt7603_wtbl_update_cap(dev, sta);
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ return 0;
}
void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 9e58df7042ad..55a034ccbacd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -213,6 +213,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
+int mt7603_set_channel(struct mt76_phy *mphy);
int mt7603_mcu_set_channel(struct mt7603_dev *dev);
int mt7603_mcu_set_eeprom(struct mt7603_dev *dev);
void mt7603_mcu_exit(struct mt7603_dev *dev);
@@ -245,8 +246,8 @@ void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7603_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index f7722f67db57..66ba3be27343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -56,6 +56,9 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
+
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
mt7615_hwmon_groups);
return PTR_ERR_OR_ZERO(hwmon);
@@ -319,7 +322,7 @@ void mt7615_init_work(struct mt7615_dev *dev)
mt7615_mcu_set_eeprom(dev);
mt7615_mac_init(dev);
mt7615_phy_init(dev);
- mt7615_mcu_del_wtbl_all(dev);
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
mt7615_check_offload_capability(dev);
}
EXPORT_SYMBOL_GPL(mt7615_init_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 50e262c1622f..376975388007 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -282,19 +282,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &mvif->sta.wcid);
}
-int mt7615_set_channel(struct mt7615_phy *phy)
+int mt7615_set_channel(struct mt76_phy *mphy)
{
+ struct mt7615_phy *phy = mphy->priv;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt7615_mutex_acquire(dev);
-
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
+ mt76_connac_pm_wake(mphy, &dev->pm);
if (is_mt7615(&dev->mt76) && dev->flash_eeprom) {
ret = mt7615_mcu_apply_rx_dcoc(phy);
@@ -325,11 +320,8 @@ int mt7615_set_channel(struct mt7615_phy *phy)
phy->chfreq = mt76_rr(dev, MT_CHFREQ(ext_phy));
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt7615_mutex_release(dev);
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76)) {
unsigned long timeout = mt7615_get_macwork_timeout(dev);
@@ -339,6 +331,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(mt7615_set_channel);
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -425,11 +418,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
if (mt7615_firmware_offload(phy->dev))
return mt76_connac_mcu_set_rate_txpower(phy->mt76);
- ieee80211_stop_queues(hw);
- err = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
-
- return err;
+ return mt76_update_channel(phy->mt76);
}
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@@ -448,9 +437,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
mt7615_mutex_release(dev);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7615_set_channel(phy);
- ieee80211_wake_queues(hw);
+ ret = mt76_update_channel(phy->mt76);
}
mt7615_mutex_acquire(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index d50d967828be..96e34277fece 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -394,7 +394,7 @@ mt7615_mcu_rx_radar_detected(struct mt7615_dev *dev, struct sk_buff *skb)
if (mt76_phy_dfs_state(mphy) < MT_DFS_STATE_CAC)
return;
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -847,6 +847,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
bool new_entry = true;
+ int conn_state;
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
@@ -863,8 +864,9 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
- enable, new_entry);
+ conn_state, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1878,16 +1880,6 @@ out:
sizeof(req), true);
}
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
-{
- struct wtbl_req_hdr req = {
- .operation = WTBL_RESET_ALL,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(WTBL_UPDATE),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
@@ -2151,7 +2143,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 87a956ea3ad7..dbb2c82407df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -182,6 +182,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
struct mt76_bus_ops *bus_ops;
struct ieee80211_ops *ops;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index a20322aae967..530da48ce3ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -399,7 +399,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
struct ieee80211_tx_rate *rates);
void mt7615_pm_wake_work(struct work_struct *work);
void mt7615_pm_power_save_work(struct work_struct *work);
-int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
@@ -457,7 +456,7 @@ void mt7615_roc_work(struct work_struct *work);
void mt7615_roc_timer(struct timer_list *timer);
void mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband);
-int mt7615_set_channel(struct mt7615_phy *phy);
+int mt7615_set_channel(struct mt76_phy *mphy);
void mt7615_init_work(struct mt7615_dev *dev);
int mt7615_mcu_restart(struct mt76_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 9692890ba51b..aebfc4576aa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -87,6 +87,7 @@ static int mt7663s_probe(struct sdio_func *func,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index a3d1cfa729ed..03f5af84424b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -141,7 +141,7 @@ mt7615_tm_init(struct mt7615_phy *phy)
mt7615_mcu_set_sku_en(phy, phy->mt76->test.state == MT76_TM_STATE_OFF);
mutex_unlock(&dev->mt76.mutex);
- mt7615_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 9335ca0776fe..5020af52c68c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -123,6 +123,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
.update_survey = mt7615_update_channel,
+ .set_channel = mt7615_set_channel,
};
static struct mt76_bus_ops bus_ops = {
.rr = mt7663u_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 5f132115ebfc..eb4765365b8c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -355,4 +355,11 @@ enum tx_port_idx {
MT_TX_PORT_IDX_MCU
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#endif /* __MT76_CONNAC2_MAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 353e66069840..db0c29e65185 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -28,8 +28,6 @@ enum {
#define MT_RXD0_MESH BIT(18)
#define MT_RXD0_MHCP BIT(19)
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
-#define MT_RXD0_NORMAL_IP_SUM BIT(23)
-#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
#define MT_RXD0_SW_PKT_TYPE_MASK GENMASK(31, 16)
#define MT_RXD0_SW_PKT_TYPE_MAP 0x380F
@@ -80,6 +78,8 @@ enum {
#define MT_RXD3_NORMAL_BEACON_UC BIT(21)
#define MT_RXD3_NORMAL_CO_ANT BIT(22)
#define MT_RXD3_NORMAL_FCS_ERR BIT(24)
+#define MT_RXD3_NORMAL_IP_SUM BIT(26)
+#define MT_RXD3_NORMAL_UDP_TCP_SUM BIT(27)
#define MT_RXD3_NORMAL_VLAN2ETH BIT(31)
/* RXD DW4 */
@@ -197,6 +197,13 @@ enum tx_mgnt_type {
MT_TX_ADDBA,
};
+enum tx_frag_idx {
+ MT_TX_FRAG_NONE,
+ MT_TX_FRAG_FIRST,
+ MT_TX_FRAG_MID,
+ MT_TX_FRAG_LAST
+};
+
#define MT_CT_INFO_APPLY_TXD BIT(0)
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
#define MT_CT_INFO_MGMT_FRAME BIT(2)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index b841bf628d02..a3db65254e37 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -391,6 +391,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control;
+ __le16 sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -432,6 +433,13 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+
txwi[2] |= cpu_to_le32(val);
if (ieee80211_is_beacon(fc)) {
@@ -440,7 +448,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 4dce03ddbfa4..864246f94088 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (wcid && !wcid->sta)
+ if (wcid && !wcid->sta && !wcid->sta_disabled)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly)
+ int conn_state, bool newly)
{
struct sta_rec_basic *basic;
struct tlv *tlv;
@@ -382,13 +382,9 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic = (struct sta_rec_basic *)tlv;
basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
- if (enable) {
- if (newly)
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
+ if (newly && conn_state != CONN_STATE_DISCONNECT)
+ basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
+ basic->conn_state = conn_state;
if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
@@ -1051,15 +1047,18 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
struct sk_buff *skb;
+ int conn_state;
skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
- link_sta, info->enable,
+ link_sta, conn_state,
info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
@@ -2850,6 +2849,17 @@ int mt76_connac_mcu_restart(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev)
+{
+ struct wtbl_req_hdr req = {
+ .operation = WTBL_RESET_ALL,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(WTBL_UPDATE),
+ &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_del_wtbl_all);
+
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 4242d436de26..1b0e80dfc346 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -115,21 +115,26 @@ struct mt76_connac2_mcu_uni_txd {
} __packed __aligned(4);
struct mt76_connac2_mcu_rxd {
- __le32 rxd[6];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(mt76_connac2_mcu_rxd_hdr, hdr,
+ __le32 rxd[6];
- __le16 len;
- __le16 pkt_type_id;
+ __le16 len;
+ __le16 pkt_type_id;
- u8 eid;
- u8 seq;
- u8 option;
- u8 rsv;
- u8 ext_eid;
- u8 rsv1[2];
- u8 s2d_index;
+ u8 eid;
+ u8 seq;
+ u8 option;
+ u8 rsv;
+ u8 ext_eid;
+ u8 rsv1[2];
+ u8 s2d_index;
+ );
u8 tlv[];
};
+static_assert(offsetof(struct mt76_connac2_mcu_rxd, tlv) == sizeof(struct mt76_connac2_mcu_rxd_hdr),
+ "struct member likely outside of struct_group_tagged()");
struct mt76_connac2_patch_hdr {
char build_date[16];
@@ -1898,7 +1903,7 @@ int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
- bool enable, bool newly);
+ int state, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -2032,6 +2037,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
void *sta_wtbl, void *wtbl_tlv);
int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_del_wtbl_all(struct mt76_dev *dev);
int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
u8 rx_sel, u8 val);
int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 07380cce8755..4aa2dcedc874 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -8,16 +8,15 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
-static void
-mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x0_set_channel(struct mt76_phy *mphy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
+
mt76x02_pre_tbtt_enable(dev, false);
if (mt76_is_mmio(&dev->mt76))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mt76_set_channel(&dev->mphy);
- mt76x0_phy_set_channel(dev, chandef);
+ mt76x0_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_edcca_init(dev);
@@ -28,8 +27,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
}
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt76x0_set_channel);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar)
@@ -61,13 +61,10 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x0_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
struct mt76_phy *mphy = &dev->mphy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 99dcb8feb9f7..50f755344968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -49,6 +49,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_set_channel(struct mt76_phy *mphy);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 2ecee7c5c80d..1eb955f3ca13 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -159,6 +159,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 390f502e97f0..b031c500b741 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -217,6 +217,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x0_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index 024a5c0a5a57..7a07636d09c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -630,7 +630,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) {
/* sw detector rx radar pattern */
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
@@ -658,7 +658,7 @@ static void mt76x02_dfs_tasklet(struct tasklet_struct *t)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
- ieee80211_radar_detected(dev->mt76.hw);
+ ieee80211_radar_detected(dev->mt76.hw, NULL);
mt76x02_dfs_detector_reset(dev);
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 35b7ebc2c9c6..4a49a3036a46 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,7 +22,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
struct sk_buff *skb;
int i;
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 29b9a15f8dbe..0e1ede9314d8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -188,10 +188,7 @@ static void mt76x02u_pre_tbtt_work(struct work_struct *work)
struct sk_buff *skb;
int nbeacons;
- if (!dev->mt76.beacon_mask)
- return;
-
- if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ if (!dev->mt76.beacon_mask || dev->mphy.offchannel)
return;
__skb_queue_head_init(&data.q);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index be1217329a77..f051721bb00e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -47,6 +47,8 @@ void mt76x2_phy_power_on(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
+int mt76x2e_set_channel(struct mt76_phy *phy);
+int mt76x2u_set_channel(struct mt76_phy *phy);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 30959746e924..67c9d1caa0bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -25,6 +25,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2e_set_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 6accea551319..eb70130d2711 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -32,33 +32,25 @@ mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2_stop_hardware(dev);
}
-static void
-mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
+int mt76x2e_set_channel(struct mt76_phy *phy)
{
- cancel_delayed_work_sync(&dev->cal_work);
+ struct mt76x02_dev *dev = container_of(phy->dev, struct mt76x02_dev, mt76);
+
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, true);
- mt76x2_phy_set_channel(dev, chandef);
+ mt76x2_phy_set_channel(dev, &phy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- mt76_txq_schedule_all(&dev->mphy);
+ return 0;
}
static int
@@ -95,11 +87,8 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- mt76x2_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e92bb871f231..e832ad53e239 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -32,6 +32,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.drv_flags = MT_DRV_SW_RX_AIRTIME,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
+ .set_channel = mt76x2u_set_channel,
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ba0241c36672..83e7061b10e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -31,32 +31,20 @@ static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
mt76x2u_stop_hw(dev);
}
-static int
-mt76x2u_set_channel(struct mt76x02_dev *dev,
- struct cfg80211_chan_def *chandef)
+int mt76x2u_set_channel(struct mt76_phy *mphy)
{
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
int err;
- cancel_delayed_work_sync(&dev->cal_work);
mt76x02_pre_tbtt_enable(dev, false);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &dev->mphy.state);
-
- mt76_set_channel(&dev->mphy);
-
mt76x2_mac_stop(dev, false);
- err = mt76x2u_phy_set_channel(dev, chandef);
+ err = mt76x2u_phy_set_channel(dev, &mphy->chandef);
mt76x02_mac_cc_reset(dev);
mt76x2_mac_resume(dev);
- clear_bit(MT76_RESET, &dev->mphy.state);
- mutex_unlock(&dev->mt76.mutex);
-
mt76x02_pre_tbtt_enable(dev, true);
- mt76_txq_schedule_all(&dev->mphy);
return err;
}
@@ -93,11 +81,8 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- err = mt76x2u_set_channel(dev, &hw->conf.chandef);
- ieee80211_wake_queues(hw);
- }
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ mt76_update_channel(&dev->mphy);
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index a978f434dc5e..6bef96e3d2a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -194,6 +194,8 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
@@ -398,6 +400,7 @@ mt7915_init_wiphy(struct mt7915_phy *phy)
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
hw->max_tx_fragments = 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 8008ce3fa6c7..cf77ce0c8759 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1448,6 +1448,7 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
dev->recovery.hw_full_reset = true;
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
@@ -1462,26 +1463,27 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (!mt7915_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (ext_phy)
- ieee80211_restart_hw(ext_phy->hw);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ while (!list_empty(&dev->mt76.sta_poll_list))
+ list_del_init(dev->mt76.sta_poll_list.next);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
- ieee80211_wake_queues(mt76_hw(dev));
- if (ext_phy)
- ieee80211_wake_queues(ext_phy->hw);
+ memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
+ dev->mt76.vif_mask = 0;
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7915_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
if (ext_phy)
- ieee80211_queue_delayed_work(ext_phy->hw,
- &ext_phy->mac_work,
- MT7915_WATCHDOG_TIME);
+ ieee80211_restart_hw(ext_phy->hw);
}
/* system error recovery */
@@ -1537,12 +1539,14 @@ void mt7915_mac_reset_work(struct work_struct *work)
set_bit(MT76_RESET, &phy2->mt76->state);
cancel_delayed_work_sync(&phy2->mt76->mac_work);
}
+
+ mutex_lock(&dev->mt76.mutex);
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i)
napi_disable(&dev->mt76.napi[i]);
napi_disable(&dev->mt76.tx_napi);
- mutex_lock(&dev->mt76.mutex);
if (mtk_wed_device_active(&dev->mt76.mmio.wed))
mtk_wed_device_stop(&dev->mt76.mmio.wed);
@@ -1692,6 +1696,11 @@ void mt7915_reset(struct mt7915_dev *dev)
return;
}
+ if ((READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) {
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ }
+
queue_work(dev->mt76.wq, &dev->reset_work);
wake_up(&dev->reset_wait);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 049223df9beb..d75e8dea1fbd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -245,7 +245,9 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
- idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7915_wtbl_size(dev));
+ if (idx < 0)
+ return -ENOSPC;
INIT_LIST_HEAD(&mvif->sta.rc_list);
INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
@@ -272,7 +274,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
memset(&mvif->cap, -1, sizeof(mvif->cap));
mt7915_mcu_add_bss_info(phy, vif, true);
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -291,7 +293,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
int idx = msta->wcid.idx;
mt7915_mcu_add_bss_info(phy, vif, false);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, mvif->sta.wcid.idx);
mutex_lock(&dev->mt76.mutex);
mt76_testmode_reset(phy->mt76, true);
@@ -317,18 +320,12 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7915_set_channel(struct mt7915_phy *phy)
+int mt7915_set_channel(struct mt76_phy *mphy)
{
+ struct mt7915_phy *phy = mphy->priv;
struct mt7915_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
@@ -347,11 +344,6 @@ int mt7915_set_channel(struct mt7915_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@@ -374,6 +366,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int idx = key->keyidx;
int err = 0;
+ if (sta && !wcid->sta)
+ return -EOPNOTSUPP;
+
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
*/
@@ -464,11 +459,9 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
}
#endif
- ieee80211_stop_queues(hw);
- ret = mt7915_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
@@ -564,8 +557,7 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
+ MT_WF_RFCR_DROP_CTL_RSV);
*total_flags = flags;
rxfilter = phy->rxfilter;
@@ -633,7 +625,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 1)
mt7915_mcu_add_bss_info(phy, vif, true);
if (set_sta == 1)
- mt7915_mcu_add_sta(dev, vif, NULL, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
@@ -668,7 +660,7 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_bss_info == 0)
mt7915_mcu_add_bss_info(phy, vif, false);
if (set_sta == 0)
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -706,7 +698,7 @@ mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
err = mt7915_mcu_add_bss_info(phy, vif, true);
if (err)
goto out;
- err = mt7915_mcu_add_sta(dev, vif, NULL, true);
+ err = mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
out:
mutex_unlock(&dev->mt76.mutex);
@@ -720,7 +712,7 @@ mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7915_dev *dev = mt7915_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_DISCONNECT, false);
mutex_unlock(&dev->mt76.mutex);
}
@@ -743,8 +735,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
- int ret, idx;
- u32 addr;
+ int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -753,25 +744,61 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
INIT_LIST_HEAD(&msta->rc_list);
INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 1;
msta->wcid.idx = idx;
msta->wcid.phy_idx = ext_phy;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->jiffies = jiffies;
ewma_avg_signal_init(&msta->avg_ack_signal);
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, true);
- ret = mt7915_mcu_add_sta(dev, vif, sta, true);
- if (ret)
- return ret;
+ return 0;
+}
- addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
- mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i, ret;
+ u32 addr;
+
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ ret = mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_CONNECT, true);
+ if (ret)
+ return ret;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
+ ret = mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ if (ret)
+ return ret;
+
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->wcid.sta = 1;
+ msta->wcid.sta_disabled = 0;
+
+ return 0;
+
+ case MT76_STA_EVENT_AUTHORIZE:
+ return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
+ mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false);
+ msta->wcid.sta_disabled = 1;
+ msta->wcid.sta = 0;
+ return 0;
+ }
+
+ return 0;
}
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -779,16 +806,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- int i;
-
- mt7915_mcu_add_sta(dev, vif, sta, false);
mt7915_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7915_mac_twt_teardown_flow(dev, msta, i);
-
spin_lock_bh(&mdev->sta_poll_lock);
if (!list_empty(&msta->wcid.poll_list))
list_del_init(&msta->wcid.poll_list);
@@ -896,22 +917,6 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
- IEEE80211_STA_NONE);
-}
-
-static int
-mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
- IEEE80211_STA_NOTEXIST);
-}
-
-static int
mt7915_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -1089,8 +1094,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (is_mt7915(&phy->dev->mt76) &&
- !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
@@ -1164,6 +1168,10 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (!msta->wcid.sta)
+ return;
mt7915_sta_rc_work(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -1207,6 +1215,9 @@ static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1223,6 +1234,9 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ if (!msta->wcid.sta)
+ return;
+
mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
@@ -1579,6 +1593,12 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
}
static int
+mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ return 0;
+}
+
+static int
mt7915_set_radar_background(struct ieee80211_hw *hw,
struct cfg80211_chan_def *chandef)
{
@@ -1660,6 +1680,17 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
}
#endif
+static void
+mt7915_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ ieee80211_wake_queues(hw);
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7915_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -1676,8 +1707,7 @@ const struct ieee80211_ops mt7915_ops = {
.bss_info_changed = mt7915_bss_info_changed,
.start_ap = mt7915_start_ap,
.stop_ap = mt7915_stop_ap,
- .sta_add = mt7915_sta_add,
- .sta_remove = mt7915_sta_remove,
+ .sta_state = mt76_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
.sta_rc_update = mt7915_sta_rc_update,
.set_key = mt7915_set_key,
@@ -1708,6 +1738,7 @@ const struct ieee80211_ops mt7915_ops = {
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
.add_twt_setup = mt7915_mac_add_twt_setup,
.twt_teardown_request = mt7915_twt_teardown_request,
+ .set_frag_threshold = mt7915_set_frag_threshold,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1718,4 +1749,5 @@ const struct ieee80211_ops mt7915_ops = {
.net_fill_forward_path = mt7915_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .reconfig_complete = mt7915_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2185cd24e2e1..87d0dd040001 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -157,12 +157,21 @@ static int
mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq)
{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt76_connac2_mcu_rxd *rxd;
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
+
+ if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) {
+ dev->recovery.restart = true;
+ wake_up(&dev->mt76.mcu.wait);
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+
return -ETIMEDOUT;
}
@@ -191,11 +200,6 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
enum mt76_mcuq_id qid;
- int ret;
-
- ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
- if (ret)
- return ret;
if (cmd == MCU_CMD(FW_SCATTER))
qid = MT_MCUQ_FWDL;
@@ -293,7 +297,7 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -690,13 +694,17 @@ int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
+ int ret;
+ mt76_worker_disable(&dev->mt76.tx_worker);
if (enable && !params->amsdu)
msta->wcid.amsdu = false;
+ ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
+ mt76_worker_enable(&dev->mt76.tx_worker);
- return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
- MCU_EXT_CMD(STA_REC_UPDATE),
- enable, true);
+ return ret;
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
@@ -1653,7 +1661,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, int conn_state, bool newly)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_link_sta *link_sta;
@@ -1670,13 +1678,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
- if (!enable)
- goto out;
-
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ conn_state, newly);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (sta && conn_state != CONN_STATE_DISCONNECT) {
/* starec bfer */
mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
/* starec ht */
@@ -1687,12 +1692,17 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
- ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret) {
- dev_kfree_skb(skb);
- return ret;
+ if (newly || conn_state != CONN_STATE_DISCONNECT) {
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
+ if (conn_state == CONN_STATE_DISCONNECT)
+ goto out;
+
if (sta) {
/* starec amsdu */
mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
@@ -2352,6 +2362,8 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
+ mt76_connac_mcu_del_wtbl_all(&dev->mt76);
+
if ((mtk_wed_device_active(&dev->mt76.mmio.wed) &&
is_mt7915(&dev->mt76)) ||
!mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
@@ -2376,7 +2388,9 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .max_retry = 3,
.headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message,
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
};
@@ -2747,7 +2761,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index b41ac4aaced7..49476a4182fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -29,7 +29,7 @@ struct mt7915_mcu_thermal_ctrl {
} __packed;
struct mt7915_mcu_thermal_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
struct mt7915_mcu_thermal_ctrl ctrl;
__le32 temperature;
@@ -37,7 +37,7 @@ struct mt7915_mcu_thermal_notify {
} __packed;
struct mt7915_mcu_csa_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 omac_idx;
u8 csa_count;
@@ -46,7 +46,7 @@ struct mt7915_mcu_csa_notify {
} __packed;
struct mt7915_mcu_bcc_notify {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 omac_idx;
@@ -55,7 +55,7 @@ struct mt7915_mcu_bcc_notify {
} __packed;
struct mt7915_mcu_rdd_report {
- struct mt76_connac2_mcu_rxd rxd;
+ struct mt76_connac2_mcu_rxd_hdr rxd;
u8 band_idx;
u8 long_detected;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index d6ecd698cdcd..44e112b8b5b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -927,8 +927,10 @@ struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
.rx_check = mt7915_rx_check,
.rx_poll_complete = mt7915_rx_poll_complete,
.sta_add = mt7915_mac_sta_add,
+ .sta_event = mt7915_mac_sta_event,
.sta_remove = mt7915_mac_sta_remove,
.update_survey = mt7915_update_channel,
+ .set_channel = mt7915_set_channel,
};
struct mt7915_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index a30d08eb0656..ac0b1f0eb27c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -444,7 +444,7 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, int conn_state, bool newly);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -463,7 +463,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-int mt7915_set_channel(struct mt7915_phy *phy);
+int mt7915_set_channel(struct mt76_phy *mphy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
@@ -560,6 +560,8 @@ void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
void mt7915_mac_set_timing(struct mt7915_phy *phy);
int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index 0d76ae31b376..d534fff5c952 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -404,6 +404,7 @@ static void
mt7915_tm_init(struct mt7915_phy *phy, bool en)
{
struct mt7915_dev *dev = phy->dev;
+ int state;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
@@ -415,7 +416,8 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
- mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+ state = en ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, state, true);
if (!en)
mt7915_tm_set_tam_arb(phy, en, 0);
@@ -425,7 +427,7 @@ static void
mt7915_tm_update_channel(struct mt7915_phy *phy)
{
mutex_unlock(&phy->dev->mt76.mutex);
- mt7915_set_channel(phy);
+ mt76_update_channel(phy->mt76);
mutex_lock(&phy->dev->mt76.mutex);
mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index ef0c721d26e3..d1d64fa7d35d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -52,6 +52,8 @@ static int mt7921_thermal_init(struct mt792x_phy *phy)
name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7921_%s",
wiphy_name(wiphy));
+ if (!name)
+ return -ENOMEM;
hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7921_hwmon_groups);
@@ -83,7 +85,7 @@ mt7921_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
}
/* UNII-4 */
- if (IS_UNII_INVALID(0, 5850, 5925))
+ if (IS_UNII_INVALID(0, 5845, 5925))
ch->flags |= IEEE80211_CHAN_DISABLED;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 23b228804289..a7f5bfbc02ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -455,37 +455,30 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
return mt7921_abort_roc(phy, mvif);
}
-static int mt7921_set_channel(struct mt792x_phy *phy)
+int mt7921_set_channel(struct mt76_phy *mphy)
{
+ struct mt792x_phy *phy = mphy->priv;
struct mt792x_dev *dev = phy->dev;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mt792x_mutex_acquire(dev);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
+ mt76_connac_pm_wake(mphy, &dev->pm);
ret = mt7921_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
mt792x_mac_set_timeing(phy);
-
mt792x_mac_reset_counters(phy);
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mt792x_mutex_release(dev);
+ mt76_connac_power_save_sched(mphy, &dev->pm);
- mt76_worker_schedule(&dev->mt76.tx_worker);
- ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
return ret;
}
+EXPORT_SYMBOL_GPL(mt7921_set_channel);
static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
@@ -620,11 +613,9 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7921_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
mt792x_mutex_acquire(dev);
@@ -831,13 +822,16 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_add);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -853,8 +847,10 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_event);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 394fcd799345..02c1de8620a7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -890,7 +890,7 @@ int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd)
if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ else if (phy->mt76->offchannel)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
NL80211_IFTYPE_AP))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 6c5392c5d207..16c89815c0b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -186,6 +186,7 @@ int __mt7921_start(struct mt792x_phy *phy);
int mt7921_register_device(struct mt792x_dev *dev);
void mt7921_unregister_device(struct mt792x_dev *dev);
int mt7921_run_firmware(struct mt792x_dev *dev);
+int mt7921_set_channel(struct mt76_phy *mphy);
int mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
@@ -244,8 +245,8 @@ int mt7921_mac_init(struct mt792x_dev *dev);
bool mt7921_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7921_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_reset_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index a7430216a80d..67723c22aea6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -244,9 +244,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops mt7921_pcie_ops = {
.init_reset = mt7921e_init_reset,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 004d942ee11a..95f526f7bb99 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -100,9 +100,10 @@ static int mt7921s_probe(struct sdio_func *func,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt76_bus_ops mt7921s_ops = {
.rr = mt76s_rr,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 8b7c03c47598..8aa4f0203208 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -151,9 +151,10 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7921_queue_rx_skb,
.rx_check = mt7921_rx_check,
.sta_add = mt7921_mac_sta_add,
- .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_event = mt7921_mac_sta_event,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt792x_update_channel,
+ .set_channel = mt7921_set_channel,
};
static const struct mt792x_hif_ops hif_ops = {
.mcu_init = mt7921u_mcu_init,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index cf36750cf709..634c42bbf23f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -352,7 +352,7 @@ mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
static int
mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
{
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
bool hdr_trans, unicast, insert_ccmp_hdr = false;
u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
@@ -362,7 +362,6 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
struct mt792x_phy *phy = &dev->phy;
struct ieee80211_supported_band *sband;
u32 csum_status = *(u32 *)skb->cb;
- u32 rxd0 = le32_to_cpu(rxd[0]);
u32 rxd1 = le32_to_cpu(rxd[1]);
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
@@ -420,7 +419,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
if (!sband->channels)
return -EINVAL;
- if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask &&
+ if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8c0768bf9343..791c8b00e112 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -439,6 +439,19 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
+void mt7925_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7925_roc_abort_sync);
+
void mt7925_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1078,23 +1091,26 @@ static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
mt792x_mutex_release(dev);
}
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
{
+ struct ieee80211_link_sta *link_sta = &sta->deflink;
+
+ if (ev != MT76_STA_EVENT_ASSOC)
+ return 0;
+
if (ieee80211_vif_is_mld(vif)) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct ieee80211_link_sta *link_sta;
link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
-
mt7925_mac_set_links(mdev, vif);
-
- mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
- } else {
- mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
}
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_event);
static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct ieee80211_vif *vif,
@@ -1109,6 +1125,8 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_id);
+ mt7925_roc_abort_sync(dev);
+
mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 9dc22fbe25d3..748ea6adbc6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -638,6 +638,9 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
clc = (const struct mt7925_clc *)(clc_base + offset);
+ if (clc->idx > ARRAY_SIZE(phy->clc))
+ break;
+
/* do not init buf again if chip reset triggered */
if (phy->clc[clc->idx])
continue;
@@ -1770,16 +1773,19 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *skb;
+ int conn_state;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
if (info->link_sta)
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
info->link_sta,
- info->enable, info->newly);
+ conn_state, info->newly);
if (info->link_sta && info->enable) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
@@ -2171,12 +2177,12 @@ void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
req = (struct bss_rlm_tlv *)tlv;
- req->control_channel = chandef->chan->hw_value,
- req->center_chan = ieee80211_frequency_to_channel(freq1),
- req->center_chan2 = ieee80211_frequency_to_channel(freq2),
- req->tx_streams = hweight8(phy->antenna_mask),
- req->ht_op_info = 4, /* set HT 40M allowed */
- req->rx_streams = hweight8(phy->antenna_mask),
+ req->control_channel = chandef->chan->hw_value;
+ req->center_chan = ieee80211_frequency_to_channel(freq1);
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2);
+ req->tx_streams = hweight8(phy->antenna_mask);
+ req->ht_op_info = 4; /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask);
req->band = band;
switch (chandef->width) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 669f3a079d04..f5c02e5f5066 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -219,8 +219,8 @@ int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt7925_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev);
void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7925_mac_reset_work(struct work_struct *work);
@@ -307,6 +307,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
enum mt7925_roc_req type, u8 token_id);
int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
+void mt7925_roc_abort_sync(struct mt792x_dev *dev);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 6e4f4e78c350..9aec675450f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -279,7 +279,7 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
.rx_skb = mt7925_queue_rx_skb,
.rx_poll_complete = mt792x_rx_poll_complete,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
@@ -449,6 +449,8 @@ static int mt7925_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7925_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 1e0f094fc905..682db1bab21c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -142,7 +142,7 @@ static int mt7925u_probe(struct usb_interface *usb_intf,
.rx_skb = mt7925_queue_rx_skb,
.rx_check = mt7925_rx_check,
.sta_add = mt7925_mac_sta_add,
- .sta_assoc = mt7925_mac_sta_assoc,
+ .sta_event = mt7925_mac_sta_event,
.sta_remove = mt7925_mac_sta_remove,
.update_survey = mt792x_update_channel,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 7fa74d59cc48..ab12616ec2b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -68,7 +68,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
- MT792x_CLC_CHAN,
+ MT792x_CLC_POWER_EXT,
MT792x_CLC_MAX_NUM,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 283df84f1b43..5e96973226bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -42,6 +42,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
+ .beacon_int_min_gcd = 100,
}
};
@@ -941,8 +942,12 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
- IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ if (is_mt7996(phy->mt76->dev))
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 3);
+ else
+ *cap |= FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 4);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -987,9 +992,15 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
elem->phy_cap_info[2] |= c;
- c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
+
+ if (is_mt7996(phy->mt76->dev))
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ else
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5;
+
elem->phy_cap_info[4] |= c;
/* do not support NG16 due to spec D4.0 changes subcarrier idx */
@@ -1011,8 +1022,6 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- if (vif == NL80211_IFTYPE_AP)
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
@@ -1020,6 +1029,11 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
sts - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
@@ -1179,12 +1193,12 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
eht_cap_elem->phy_cap_info[0] =
- IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
- val = max_t(u8, sts - 1, 3);
+ /* Set the maximum capability regardless of the antenna configuration. */
+ val = is_mt7992(phy->mt76->dev) ? 4 : 3;
eht_cap_elem->phy_cap_info[0] |=
u8_encode_bits(u8_get_bits(val, BIT(0)),
IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
@@ -1193,30 +1207,36 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
u8_encode_bits(u8_get_bits(val, GENMASK(2, 1)),
IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
- u8_encode_bits(val,
- IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
eht_cap_elem->phy_cap_info[2] =
u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
- u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK);
+
+ if (band == NL80211_BAND_6GHZ) {
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(val,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+ }
eht_cap_elem->phy_cap_info[3] =
IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
- IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK;
eht_cap_elem->phy_cap_info[4] =
u8_encode_bits(min_t(int, sts - 1, 2),
IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
eht_cap_elem->phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
@@ -1230,14 +1250,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
- eht_cap_elem->phy_cap_info[7] =
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
- IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
- IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
-
val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
#define SET_EHT_MAX_NSS(_bw, _val) do { \
@@ -1248,8 +1260,29 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
SET_EHT_MAX_NSS(80, val);
SET_EHT_MAX_NSS(160, val);
- SET_EHT_MAX_NSS(320, val);
+ if (band == NL80211_BAND_6GHZ)
+ SET_EHT_MAX_NSS(320, val);
#undef SET_EHT_MAX_NSS
+
+ if (iftype != NL80211_IFTYPE_AP)
+ return;
+
+ eht_cap_elem->phy_cap_info[3] |=
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ;
+
+ if (band != NL80211_BAND_6GHZ)
+ return;
+
+ eht_cap_elem->phy_cap_info[7] |=
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index bc7111a71f98..0d21414e2c88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -435,7 +435,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
u32 csum_status = *(u32 *)skb->cb;
u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
@@ -497,7 +497,7 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
if (!sband->channels)
return -EINVAL;
- if ((rxd0 & csum_mask) == csum_mask &&
+ if ((rxd3 & csum_mask) == csum_mask &&
!(csum_status & (BIT(0) | BIT(2) | BIT(3))))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -746,7 +746,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- __le16 fc = hdr->frame_control;
+ __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
u8 fc_type, fc_stype;
u32 val;
@@ -780,6 +780,15 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
+ if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
+ else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
+ else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
+ else
+ val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
+
txwi[2] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
@@ -789,7 +798,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(hdr->seq_ctrl);
+ u16 seqno = le16_to_cpu(sc);
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index bce082038219..39f071ece35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -206,7 +206,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mvif->mt76.omac_idx = idx;
mvif->phy = phy;
mvif->mt76.band_idx = band_idx;
- mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ mvif->mt76.wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
ret = mt7996_mcu_add_dev_info(phy, vif, true);
if (ret)
@@ -291,22 +291,19 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
-int mt7996_set_channel(struct mt7996_phy *phy)
+int mt7996_set_channel(struct mt76_phy *mphy)
{
- struct mt7996_dev *dev = phy->dev;
+ struct mt7996_phy *phy = mphy->priv;
int ret;
- cancel_delayed_work_sync(&phy->mt76->mac_work);
-
- mutex_lock(&dev->mt76.mutex);
- set_bit(MT76_RESET, &phy->mt76->state);
-
- mt76_set_channel(phy->mt76);
-
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
if (ret)
goto out;
+ ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_RX_PATH);
+ if (ret)
+ goto out;
+
ret = mt7996_dfs_init_radar_detector(phy);
mt7996_mac_cca_stats_reset(phy);
@@ -314,13 +311,7 @@ int mt7996_set_channel(struct mt7996_phy *phy)
phy->noise = 0;
out:
- clear_bit(MT76_RESET, &phy->mt76->state);
- mutex_unlock(&dev->mt76.mutex);
-
- mt76_txq_schedule_all(phy->mt76);
-
- ieee80211_queue_delayed_work(phy->mt76->hw,
- &phy->mt76->mac_work,
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7996_WATCHDOG_TIME);
return ret;
@@ -360,14 +351,14 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_SMS4:
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- fallthrough;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7)
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
break;
+ }
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -411,11 +402,9 @@ static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
int ret;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ieee80211_stop_queues(hw);
- ret = mt7996_set_channel(phy);
+ ret = mt76_update_channel(phy->mt76);
if (ret)
return ret;
- ieee80211_wake_queues(hw);
}
if (changed & (IEEE80211_CONF_CHANGE_POWER |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 2e4fa9f48dfb..6c445a9dbc03 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -371,7 +371,7 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
&dev->rdd2_chandef,
GFP_ATOMIC);
else
- ieee80211_radar_detected(mphy->hw);
+ ieee80211_radar_detected(mphy->hw, NULL);
dev->hw_pattern++;
}
@@ -735,7 +735,7 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv = skb_put(skb, len);
+ struct tlv *ptlv = skb_put_zero(skb, len);
ptlv->tag = cpu_to_le16(tag);
ptlv->len = cpu_to_le16(len);
@@ -822,7 +822,7 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
- if (!vif->bss_conf.bssid_indicator)
+ if (!vif->bss_conf.bssid_indicator && enable)
return;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
@@ -1429,10 +1429,10 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
if (bfee)
return vif->bss_conf.eht_su_beamformee &&
- EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
return vif->bss_conf.eht_su_beamformer &&
- EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
if (sta->deflink.he_cap.has_he) {
@@ -1544,6 +1544,9 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
u8 snd_dim, sts;
+ if (!vc)
+ return;
+
bf->tx_mode = MT_PHY_TYPE_HE_SU;
mt7996_mcu_sta_sounding_rate(bf);
@@ -1653,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
static const u8 matrix[4][4] = {
@@ -2160,6 +2163,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
+ int conn_state;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
@@ -2172,8 +2176,9 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
+ conn_state = enable ? CONN_STATE_PORT_SECURE : CONN_STATE_DISCONNECT;
mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
- enable, newly);
+ conn_state, newly);
if (!enable)
goto out;
@@ -3460,7 +3465,7 @@ int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag)
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ else if (phy->mt76->offchannel ||
phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
@@ -3923,8 +3928,9 @@ int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action)
tlv = mt7996_mcu_add_uni_tlv(skb, action, sizeof(*req_mod_en));
req_mod_en = (struct bf_mod_en_ctrl *)tlv;
- req_mod_en->bf_num = 3;
- req_mod_en->bf_bitmap = GENMASK(2, 0);
+ req_mod_en->bf_num = mt7996_band_valid(dev, MT_BAND2) ? 3 : 2;
+ req_mod_en->bf_bitmap = mt7996_band_valid(dev, MT_BAND2) ?
+ GENMASK(2, 0) : GENMASK(1, 0);
break;
}
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 928a9663b49e..40e45fb2b626 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -620,6 +620,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.sta_add = mt7996_mac_sta_add,
.sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
+ .set_channel = mt7996_set_channel,
};
struct mt7996_dev *dev;
struct mt76_dev *mdev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 177cfff31120..ab8c9070630b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -468,7 +468,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_he_obss_pd *he_obss_pd);
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool changed);
-int mt7996_set_channel(struct mt7996_phy *phy);
+int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5cf6edee4d13..ce193e625666 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -313,6 +313,9 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
return idx;
wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!wcid->sta)
+ return idx;
+
q->entry[idx].wcid = wcid->idx;
if (!non_aql)
@@ -330,6 +333,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) {
ieee80211_free_txskb(phy->hw, skb);
@@ -345,9 +349,15 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
- spin_lock_bh(&wcid->tx_pending.lock);
- __skb_queue_tail(&wcid->tx_pending, skb);
- spin_unlock_bh(&wcid->tx_pending.lock);
+ if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
+ (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ head = &wcid->tx_offchannel;
+ else
+ head = &wcid->tx_pending;
+
+ spin_lock_bh(&head->lock);
+ __skb_queue_tail(head, skb);
+ spin_unlock_bh(&head->lock);
spin_lock_bh(&phy->tx_lock);
if (list_empty(&wcid->tx_list))
@@ -478,7 +488,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@@ -522,7 +532,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) {
int n_frames = 0;
- if (test_bit(MT76_RESET, &phy->state))
+ if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
@@ -568,7 +578,7 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
int len;
- if (qid >= 4)
+ if (qid >= 4 || phy->offchannel)
return;
local_bh_disable();
@@ -586,7 +596,8 @@ void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
static int
-mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
+mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
+ struct sk_buff_head *head)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_sta *sta;
@@ -594,8 +605,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
struct sk_buff *skb;
int ret = 0;
- spin_lock(&wcid->tx_pending.lock);
- while ((skb = skb_peek(&wcid->tx_pending)) != NULL) {
+ spin_lock(&head->lock);
+ while ((skb = skb_peek(head)) != NULL) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int qid = skb_get_queue_mapping(skb);
@@ -607,13 +618,13 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
- if (mt76_txq_stopped(q)) {
+ if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
ret = -1;
break;
}
- __skb_unlink(skb, &wcid->tx_pending);
- spin_unlock(&wcid->tx_pending.lock);
+ __skb_unlink(skb, head);
+ spin_unlock(&head->lock);
sta = wcid_to_sta(wcid);
spin_lock(&q->lock);
@@ -621,15 +632,17 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid)
dev->queue_ops->kick(dev, q);
spin_unlock(&q->lock);
- spin_lock(&wcid->tx_pending.lock);
+ spin_lock(&head->lock);
}
- spin_unlock(&wcid->tx_pending.lock);
+ spin_unlock(&head->lock);
return ret;
}
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{
+ LIST_HEAD(tx_list);
+
if (list_empty(&phy->tx_list))
return;
@@ -637,22 +650,27 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
rcu_read_lock();
spin_lock(&phy->tx_lock);
- while (!list_empty(&phy->tx_list)) {
- struct mt76_wcid *wcid = NULL;
+ list_splice_init(&phy->tx_list, &tx_list);
+ while (!list_empty(&tx_list)) {
+ struct mt76_wcid *wcid;
int ret;
- wcid = list_first_entry(&phy->tx_list, struct mt76_wcid, tx_list);
+ wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock);
- ret = mt76_txq_schedule_pending_wcid(phy, wcid);
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ if (ret >= 0 && !phy->offchannel)
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock);
- if (ret) {
- if (list_empty(&wcid->tx_list))
- list_add_tail(&wcid->tx_list, &phy->tx_list);
+ if (!skb_queue_empty(&wcid->tx_pending) &&
+ !skb_queue_empty(&wcid->tx_offchannel) &&
+ list_empty(&wcid->tx_list))
+ list_add_tail(&wcid->tx_list, &phy->tx_list);
+
+ if (ret < 0)
break;
- }
}
spin_unlock(&phy->tx_lock);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 3c48e1a57b24..bba53307b960 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -384,6 +384,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct wilc_join_bss_param *param;
u8 rates_len = 0;
int ies_len;
+ u64 ies_tsf;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -399,6 +400,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
return NULL;
}
ies_len = ies->len;
+ ies_tsf = ies->tsf;
rcu_read_unlock();
param->beacon_period = cpu_to_le16(bss->beacon_interval);
@@ -454,7 +456,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
- param->tsf_lo = cpu_to_le32(ies->tsf);
+ param->tsf_lo = cpu_to_le32(ies_tsf);
param->noa_enabled = 1;
param->idx = noa_attr.index;
if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 683a35c682a8..b4da05d5a498 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -174,19 +174,18 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->bus_data = sdio_priv;
wilc->dev = &func->dev;
- wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&func->card->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto dispose_irq;
}
- clk_prepare_enable(wilc->rtc_clk);
wilc_sdio_init(wilc, false);
ret = wilc_load_mac_from_nv(wilc);
if (ret) {
pr_err("Can not retrieve MAC address from chip\n");
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
wilc_sdio_deinit(wilc);
@@ -195,14 +194,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto clk_disable_unprepare;
+ goto dispose_irq;
}
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
-clk_disable_unprepare:
- clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -217,7 +214,6 @@ static void wilc_sdio_remove(struct sdio_func *func)
struct wilc *wilc = sdio_get_drvdata(func);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 5ff940c53ad9..05b577b1068e 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -228,12 +228,11 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret < 0)
goto netdev_cleanup;
- wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc");
+ wilc->rtc_clk = devm_clk_get_optional_enabled(&spi->dev, "rtc");
if (IS_ERR(wilc->rtc_clk)) {
ret = PTR_ERR(wilc->rtc_clk);
goto netdev_cleanup;
}
- clk_prepare_enable(wilc->rtc_clk);
dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
@@ -266,7 +265,6 @@ static int wilc_bus_probe(struct spi_device *spi)
return 0;
power_down:
- clk_disable_unprepare(wilc->rtc_clk);
wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
@@ -280,7 +278,6 @@ static void wilc_bus_remove(struct spi_device *spi)
struct wilc *wilc = spi_get_drvdata(spi);
struct wilc_spi *spi_priv = wilc->bus_data;
- clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
kfree(spi_priv);
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 663d77770fce..8b97accf6638 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -837,7 +837,7 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
static int qtnf_start_radar_detection(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
int ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 76b07db284f8..71840f41b73c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -520,21 +520,21 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
cfg80211_radar_event(wiphy, &chandef, GFP_KERNEL);
break;
case QLINK_RADAR_CAC_FINISHED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_FINISHED, GFP_KERNEL);
+ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_ABORTED:
- if (!vif->wdev.cac_started)
+ if (!vif->wdev.links[0].cac_started)
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0);
break;
case QLINK_RADAR_CAC_STARTED:
- if (vif->wdev.cac_started)
+ if (vif->wdev.links[0].cac_started)
break;
if (!wiphy_ext_feature_isset(wiphy,
@@ -542,7 +542,7 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif,
break;
cfg80211_cac_event(vif->netdev, &chandef,
- NL80211_RADAR_CAC_STARTED, GFP_KERNEL);
+ NL80211_RADAR_CAC_STARTED, GFP_KERNEL, 0);
break;
default:
pr_warn("%s: unhandled radar event %u\n",
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index de3332eb7a22..a99776af56c2 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -2194,7 +2194,6 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 table_case, tdma_case;
- bool wl_cpt_test = false, bt_cpt_test = false;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2202,29 +2201,16 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
if (efuse->share_ant) {
/* Shared-Ant */
- if (wl_cpt_test) {
- if (coex_stat->wl_gl_busy) {
- table_case = 20;
- tdma_case = 17;
- } else {
- table_case = 10;
- tdma_case = 15;
- }
- } else if (bt_cpt_test) {
- table_case = 26;
- tdma_case = 26;
- } else {
- if (coex_stat->wl_gl_busy &&
- coex_stat->wl_noisy_level == 0)
- table_case = 14;
- else
- table_case = 10;
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
- if (coex_stat->wl_gl_busy)
- tdma_case = 15;
- else
- tdma_case = 20;
- }
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
} else {
/* Non-Shared-Ant */
table_case = 112;
@@ -2235,11 +2221,7 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
tdma_case = 120;
}
- if (wl_cpt_test)
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]);
- else
- rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
-
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
rtw_coex_table(rtwdev, false, table_case);
rtw_coex_tdma(rtwdev, false, tdma_case);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index ab7d414d0ba6..b9b0114e253b 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -1468,10 +1468,12 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
- val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
- bckp[1] = val;
- val &= ~(BIT_EN_BCNQ_DL >> 16);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+ }
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
if (ret) {
@@ -1496,7 +1498,8 @@ restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 63326b352738..b39e90fb66b4 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -167,6 +167,12 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_lock(&rtwdev->mutex);
+ rtwvif->mac_id = rtw_acquire_macid(rtwdev);
+ if (rtwvif->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ mutex_unlock(&rtwdev->mutex);
+ return -ENOSPC;
+ }
+
port = find_first_zero_bit(rtwdev->hw_port, RTW_PORT_NUM);
if (port >= RTW_PORT_NUM) {
mutex_unlock(&rtwdev->mutex);
@@ -214,7 +220,8 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
- rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
return 0;
}
@@ -225,7 +232,8 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
- rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+ rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM mac_id %d on port %d\n",
+ vif->addr, rtwvif->mac_id, rtwvif->port);
mutex_lock(&rtwdev->mutex);
@@ -242,6 +250,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
config |= PORT_SET_BCN_CTRL;
rtw_vif_port_config(rtwdev, rtwvif, config);
clear_bit(rtwvif->port, rtwdev->hw_port);
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
rtw_recalc_lps(rtwdev, NULL);
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index b0c9b0ff7017..bbdef38c7e34 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -311,17 +311,6 @@ static void rtw_ips_work(struct work_struct *work)
mutex_unlock(&rtwdev->mutex);
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
static void rtw_sta_rc_work(struct work_struct *work)
{
struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
@@ -340,12 +329,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
int i;
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
- return -ENOSPC;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ si->mac_id = rtwvif->mac_id;
+ } else {
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+ }
- if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc == 0)
- rtwvif->mac_id = si->mac_id;
si->rtwdev = rtwdev;
si->sta = sta;
si->vif = vif;
@@ -370,11 +361,13 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
bool fw_exist)
{
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ struct ieee80211_vif *vif = si->vif;
int i;
cancel_work_sync(&si->rc_work);
- rtw_release_macid(rtwdev, si->mac_id);
+ if (vif->type != NL80211_IFTYPE_STATION)
+ rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
@@ -614,6 +607,8 @@ static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw_bf_disassoc(rtwdev, vif, NULL);
rtw_vif_assoc_changed(rtwvif, NULL);
rtw_txq_cleanup(rtwdev, vif->txq);
+
+ rtw_release_macid(rtwdev, rtwvif->mac_id);
}
void rtw_fw_recovery(struct rtw_dev *rtwdev)
@@ -2139,7 +2134,6 @@ int rtw_core_init(struct rtw_dev *rtwdev)
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
rtwdev->dm_info.fix_rate = U8_MAX;
- set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
rtw_stats_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 12b564ad3a58..945117afe143 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -742,7 +742,6 @@ struct rtw_txq {
unsigned long flags;
};
-#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
@@ -805,7 +804,7 @@ struct rtw_bf_info {
struct rtw_vif {
enum rtw_net_type net_type;
u16 aid;
- u8 mac_id; /* for STA mode only */
+ u8 mac_id;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
@@ -2131,6 +2130,17 @@ static inline bool rtw_chip_has_tx_stbc(struct rtw_dev *rtwdev)
return rtwdev->chip->tx_stbc;
}
+static inline u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
{
clear_bit(mac_id, rtwdev->mac_id_map);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index c02ac673be32..dae7ca148865 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -46,7 +46,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
le32_encode_bits(pkt_info->ls, RTW_TX_DESC_W0_LS) |
le32_encode_bits(pkt_info->dis_qselseq, RTW_TX_DESC_W0_DISQSELSEQ);
- tx_desc->w1 = le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
+ tx_desc->w1 = le32_encode_bits(pkt_info->mac_id, RTW_TX_DESC_W1_MACID) |
+ le32_encode_bits(pkt_info->qsel, RTW_TX_DESC_W1_QSEL) |
le32_encode_bits(pkt_info->rate_id, RTW_TX_DESC_W1_RATE_ID) |
le32_encode_bits(pkt_info->sec_type, RTW_TX_DESC_W1_SEC_TYPE) |
le32_encode_bits(pkt_info->pkt_offset, RTW_TX_DESC_W1_PKT_OFFSET) |
@@ -401,14 +402,18 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
struct rtw_sta_info *si;
- struct ieee80211_vif *vif = NULL;
+ struct rtw_vif *rtwvif;
__le16 fc = hdr->frame_control;
bool bmc;
if (sta) {
si = (struct rtw_sta_info *)sta->drv_priv;
- vif = si->vif;
+ pkt_info->mac_id = si->mac_id;
+ } else if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ pkt_info->mac_id = rtwvif->mac_id;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index 324189606257..3d544fd7f60f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -27,6 +27,7 @@ struct rtw_tx_desc {
#define RTW_TX_DESC_W0_BMC BIT(24)
#define RTW_TX_DESC_W0_LS BIT(26)
#define RTW_TX_DESC_W0_DISQSELSEQ BIT(31)
+#define RTW_TX_DESC_W1_MACID GENMASK(7, 0)
#define RTW_TX_DESC_W1_QSEL GENMASK(12, 8)
#define RTW_TX_DESC_W1_RATE_ID GENMASK(20, 16)
#define RTW_TX_DESC_W1_SEC_TYPE GENMASK(23, 22)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index a67e16ded91d..7070c85e2c28 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -191,7 +191,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx cur;
if (chandef) {
- cur = atomic_cmpxchg(&hal->roc_entity_idx,
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx,
RTW89_CHANCTX_IDLE, idx);
if (cur != RTW89_CHANCTX_IDLE) {
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
@@ -201,7 +201,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
hal->roc_chandef = *chandef;
} else {
- cur = atomic_cmpxchg(&hal->roc_entity_idx, idx,
+ cur = atomic_cmpxchg(&hal->roc_chanctx_idx, idx,
RTW89_CHANCTX_IDLE);
if (cur == idx)
return;
@@ -230,7 +230,7 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
hal->entity_pause = false;
bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
- atomic_set(&hal->roc_entity_idx, RTW89_CHANCTX_IDLE);
+ atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE);
rtw89_config_default_chandef(rtwdev);
}
@@ -2395,11 +2395,11 @@ static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
rtwvif->chanctx_idx = idx1;
}
- cur = atomic_read(&hal->roc_entity_idx);
+ cur = atomic_read(&hal->roc_chanctx_idx);
if (cur == idx1)
- atomic_set(&hal->roc_entity_idx, idx2);
+ atomic_set(&hal->roc_chanctx_idx, idx2);
else if (cur == idx2)
- atomic_set(&hal->roc_entity_idx, idx1);
+ atomic_set(&hal->roc_chanctx_idx, idx1);
}
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index c1f6fcef904b..df51b29142aa 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,6 +129,13 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
+ .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1351,6 +1358,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
break;
+ } else if (ver->fcxbtcrpt == 7) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
+ break;
} else {
goto err;
}
@@ -1655,6 +1666,38 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 7) {
+ prpt->v7 = pfwinfo->rpt_ctrl.finfo.v7;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v7.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v7.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else if (ver->fcxbtcrpt == 8) {
prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
@@ -2397,7 +2440,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- if (btc->ver->fcxbtcrpt == 8) {
+ if (btc->ver->fcxbtcrpt == 7 || btc->ver->fcxbtcrpt == 8) {
r.v8.type = SET_REPORT_EN;
r.v8.fver = btc->ver->fcxbtcrpt;
r.v8.len = sizeof(r.v8.map);
@@ -2567,6 +2610,10 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
+ else if (ver->fwlrole == 7)
+ rtw89_fw_h2c_cxdrv_role_v7(rtwdev, type);
+ else if (ver->fwlrole == 8)
+ rtw89_fw_h2c_cxdrv_role_v8(rtwdev, type);
break;
case CXDRVINFO_CTRL:
if (ver->drvinfo_type == 1)
@@ -2748,7 +2795,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
}
-#define BTC_TDMA_WLROLE_MAX 2
+#define BTC_TDMA_WLROLE_MAX 3
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
{
@@ -2998,10 +3045,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_active_role_v7 *r7;
struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -3018,6 +3067,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 7) {
+ mode = wl_rinfo_v7->link_mode;
+ connect_cnt = wl_rinfo_v7->connect_cnt;
} else if (ver->fwlrole == 8) {
mode = wl_rinfo_v8->link_mode;
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -3036,6 +3088,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3056,6 +3109,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ (r7->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r7->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
(rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
@@ -3071,6 +3130,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ r7 = &wl_rinfo_v7->active_role[i];
rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
@@ -3088,6 +3148,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 7 &&
+ r7->connected && r7->band == RTW89_BAND_2G) {
+ ch = r7->ch;
+ bw = r7->bw;
+ break;
} else if (ver->fwlrole == 8 &&
rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
ch = rlink->ch;
@@ -3146,6 +3211,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -3164,6 +3230,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
connect_cnt = wl_rinfo_v1->connect_cnt;
else if (ver->fwlrole == 2)
connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 7)
+ connect_cnt = wl_rinfo_v7->connect_cnt;
else if (ver->fwlrole == 8)
connect_cnt = wl_rinfo_v8->connect_cnt;
@@ -4082,6 +4150,8 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
dbcc_chg = wl->role_info_v1.dbcc_chg;
else if (btc->ver->fwlrole == 2)
dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 7)
+ dbcc_chg = wl->role_info_v7.dbcc_chg;
else if (btc->ver->fwlrole == 8)
dbcc_chg = wl->role_info_v8.dbcc_chg;
@@ -4754,6 +4824,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
@@ -4775,6 +4846,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ wl_rinfo.link_mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
@@ -4790,6 +4863,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 7) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy;
} else if (ver->fwlrole == 8) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
@@ -4835,37 +4910,56 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- u8 is_preagc, val;
+ u8 is_preagc, val, link_mode, dbcc_2g_phy;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
+ bool dbcc_en;
if (btc->manual_ctrl)
return;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (role_ver == 2) {
+ dbcc_en = rinfo_v2->dbcc_en;
+ link_mode = rinfo_v2->link_mode;
+ dbcc_2g_phy = rinfo_v2->dbcc_2g_phy;
+ } else if (role_ver == 7) {
+ dbcc_en = rinfo_v7->dbcc_en;
+ link_mode = rinfo_v7->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else if (role_ver == 8) {
+ dbcc_en = rinfo_v8->dbcc_en;
+ link_mode = rinfo_v8->link_mode;
+ dbcc_2g_phy = rinfo_v7->dbcc_2g_phy;
+ } else {
+ return;
+ }
+
+ if (link_mode == BTC_WLINK_25G_MCC) {
is_preagc = BTC_PREAGC_BB_FWCTRL;
- else if (!(bt->run_patch_code && bt->enable.now))
+ } else if (!(bt->run_patch_code && bt->enable.now)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_5G)
+ } else if (link_mode == BTC_WLINK_5G) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- btc->cx.bt.link_info.profile_cnt.now == 0)
+ } else if (link_mode == BTC_WLINK_NOLINK ||
+ btc->cx.bt.link_info.profile_cnt.now == 0) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (dm->tdma_now.type != CXTDMA_OFF &&
+ } else if (dm->tdma_now.type != CXTDMA_OFF &&
!bt_linfo->hfp_desc.exist &&
!bt_linfo->hid_desc.exist &&
- dm->fddt_train == BTC_FDDT_DISABLE)
+ dm->fddt_train == BTC_FDDT_DISABLE) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
- wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
+ } else if (dbcc_en && (dbcc_2g_phy != RTW89_PHY_1)) {
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->ant_type == BTC_ANT_SHARED)
+ } else if (btc->ant_type == BTC_ANT_SHARED) {
is_preagc = BTC_PREAGC_DISABLE;
- else
+ } else {
is_preagc = BTC_PREAGC_ENABLE;
+ }
if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
@@ -4968,6 +5062,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
@@ -4984,6 +5079,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5043,6 +5140,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
@@ -5054,6 +5152,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -5359,15 +5459,26 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_wl_role_info_v2 *wl_rinfo = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ u32 dur, mrole_type, mrole_noa_duration;
u16 policy_type = BTC_CXP_OFF_BT;
- u32 dur;
+
+ if (btc->ver->fwlrole == 2) {
+ mrole_type = rinfo_v2->mrole_type;
+ mrole_noa_duration = rinfo_v2->mrole_noa_duration;
+ } else if (btc->ver->fwlrole == 7) {
+ mrole_type = rinfo_v7->mrole_type;
+ mrole_noa_duration = rinfo_v7->mrole_noa_duration;
+ } else {
+ return;
+ }
if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
- switch (wl_rinfo->mrole_type) {
+ switch (mrole_type) {
case BTC_WLMROLE_STA_GC:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
@@ -5385,7 +5496,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
case BTC_WLMROLE_STA_GO_NOA:
dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
- dur = wl_rinfo->mrole_noa_duration;
+ dur = mrole_noa_duration;
if (wl->status.map._4way) {
dm->wl_scc.ebt_null = 0;
@@ -5567,6 +5678,14 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
return next_state;
}
+static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
+{
+ if (mac == RTW89_MAC_0)
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+ else
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
+}
+
static
void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -6065,12 +6184,20 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
u8 *phy, u8 *role, u8 *dbcc_2g_phy)
{
struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
- u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt;
+
+ if (rtwdev->btc.ver->fwlrole == 7)
+ connect_cnt = rinfo_v7->connect_cnt;
+ else if (rtwdev->btc.ver->fwlrole == 8)
+ connect_cnt = rinfo_v8->connect_cnt;
+ else
+ return BTC_WLINK_NOLINK;
/* find out the 2G-PHY by connect-id ->ch */
- for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ for (j = 0; j < connect_cnt; j++) {
if (ch[j].center_ch <= 14) {
is_2g_ch_exist = true;
break;
@@ -6085,11 +6212,11 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
*dbcc_2g_phy = phy[dbcc_2g_cid];
/* connect_cnt <= 2 */
- if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ if (connect_cnt < BTC_TDMA_WLROLE_MAX)
return (_get_role_link_mode((role[dbcc_2g_cid])));
/* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
- for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ for (k = 0; k < connect_cnt; k++) {
if (k == dbcc_2g_cid)
continue;
@@ -6116,29 +6243,54 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
static void _update_role_link_mode(struct rtw89_dev *rtwdev,
bool client_joined, u32 noa)
{
- struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &rtwdev->btc.cx.wl.role_info_v8;
+ struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &rtwdev->btc.cx.wl.role_info_v7;
+ u8 role_ver = rtwdev->btc.ver->fwlrole;
u32 type = BTC_WLMROLE_NONE, dur = 0;
- u32 wl_role = wl_rinfo->role_map;
+ u8 link_mode, connect_cnt;
+ u32 wl_role;
+
+ if (role_ver == 7) {
+ wl_role = rinfo_v7->role_map;
+ link_mode = rinfo_v7->link_mode;
+ connect_cnt = rinfo_v7->connect_cnt;
+ } else if (role_ver == 8) {
+ wl_role = rinfo_v8->role_map;
+ link_mode = rinfo_v8->link_mode;
+ connect_cnt = rinfo_v8->connect_cnt;
+ } else {
+ return;
+ }
/* if no client_joined, don't care P2P-GO/AP role */
if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
- if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
- wl_rinfo->link_mode = BTC_WLINK_2G_STA;
- wl_rinfo->connect_cnt--;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
- wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
- wl_rinfo->link_mode = BTC_WLINK_NOLINK;
- wl_rinfo->connect_cnt--;
+ if (link_mode == BTC_WLINK_2G_SCC) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_2G_STA;
+ rinfo_v8->connect_cnt--;
+ }
+ } else if (link_mode == BTC_WLINK_2G_GO ||
+ link_mode == BTC_WLINK_2G_AP) {
+ if (role_ver == 7) {
+ rinfo_v7->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v7->connect_cnt--;
+ } else if (role_ver == 8) {
+ rinfo_v8->link_mode = BTC_WLINK_NOLINK;
+ rinfo_v8->connect_cnt--;
+ }
}
}
/* Identify 2-Role type */
- if (wl_rinfo->connect_cnt >= 2 &&
- (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if (connect_cnt >= 2 &&
+ (link_mode == BTC_WLINK_2G_SCC ||
+ link_mode == BTC_WLINK_2G_MCC ||
+ link_mode == BTC_WLINK_25G_MCC ||
+ link_mode == BTC_WLINK_5G)) {
if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
(wl_role & BIT(RTW89_WIFI_ROLE_AP)))
type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
@@ -6150,8 +6302,167 @@ static void _update_role_link_mode(struct rtw89_dev *rtwdev,
dur = noa;
}
- wl_rinfo->mrole_type = type;
- wl_rinfo->mrole_noa_duration = dur;
+ if (role_ver == 7) {
+ rinfo_v7->mrole_type = type;
+ rinfo_v7->mrole_noa_duration = dur;
+ } else if (role_ver == 8) {
+ rinfo_v8->mrole_type = type;
+ rinfo_v8->mrole_noa_duration = dur;
+ }
+}
+
+static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
+{
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo = &wl->role_info_v7;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
+ u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 mac = RTW89_MAC_0, dbcc_2g_phy = RTW89_PHY_0;
+ u32 noa_duration = 0;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ continue;
+
+ act_role = &wl_rinfo->active_role[i];
+ act_role->role = wl_linfo[i].role;
+
+ /* check if role connect? */
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ act_role->connected = 0;
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ continue;
+ }
+
+ cnt++;
+ act_role->connected = 1;
+ act_role->pid = wl_linfo[i].pid;
+ act_role->phy = wl_linfo[i].phy;
+ act_role->band = wl_linfo[i].band;
+ act_role->ch = wl_linfo[i].ch;
+ act_role->bw = wl_linfo[i].bw;
+ act_role->noa = wl_linfo[i].noa;
+ act_role->noa_dur = wl_linfo[i].noa_duration;
+ cid_ch[cnt - 1] = wl_linfo[i].chdef;
+ cid_phy[cnt - 1] = wl_linfo[i].phy;
+ cid_role[cnt - 1] = wl_linfo[i].role;
+ wl_rinfo->role_map |= BIT(wl_linfo[i].role);
+
+ if (rid == i)
+ phy_now = act_role->phy;
+
+ if (wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) {
+ if (wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ if (client_cnt_last[i] < wl_linfo[i].client_cnt &&
+ wl_linfo[i].chdef.band == RTW89_BAND_2G)
+ client_inc_2g = true;
+ act_role->client_cnt = wl_linfo[i].client_cnt;
+ } else {
+ act_role->client_cnt = 0;
+ }
+
+ if (act_role->noa && act_role->noa_dur > 0)
+ noa_duration = act_role->noa_dur;
+
+ if (rtwdev->dbcc_en) {
+ phy_dbcc = wl_linfo[i].phy;
+ wl_dinfo->role[phy_dbcc] |= BIT(wl_linfo[i].role);
+ wl_dinfo->op_band[phy_dbcc] = wl_linfo[i].chdef.band;
+ }
+
+ if (wl_linfo[i].chdef.band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ client_joined) ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ wl_rinfo->p2p_2g = 1;
+
+ if ((wl_linfo[i].mode & BIT(BTC_WL_MODE_11B)) ||
+ (wl_linfo[i].mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+ else if (wl_linfo[i].mode & BIT(BTC_WL_MODE_HE))
+ wl->he_mode = true;
+
+ cnt_2g++;
+ b2g = true;
+ }
+
+ if (act_role->band == RTW89_BAND_5G && act_role->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+ else
+ wl->is_5g_hi_channel = 0;
+ }
+
+ wl_rinfo->connect_cnt = cnt;
+ wl->client_cnt_inc_2g = client_inc_2g;
+
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (rtwdev->dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
+
+ /* correct 2G-located PHY band for gnt ctrl */
+ if (dbcc_2g_phy < RTW89_PHY_MAX)
+ wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ } else {
+ mode = BTC_WLINK_NOLINK;
+ }
+
+ wl_rinfo->link_mode = mode;
+ _update_role_link_mode(rtwdev, client_joined, noa_duration);
+
+ /* todo DBCC related event */
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] wl_info phy_now=%d\n", phy_now);
+
+ if (wl_rinfo->dbcc_en != rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_chg = 1;
+ wl_rinfo->dbcc_en = rtwdev->dbcc_en;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ }
+
+ if (rtwdev->dbcc_en) {
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+
+ if (dbcc_2g_phy == RTW89_PHY_1)
+ mac = RTW89_MAC_1;
+
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+ _wl_req_mac(rtwdev, mac);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
@@ -6496,6 +6807,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
@@ -6511,6 +6823,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -6657,7 +6971,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc(rtwdev);
else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ else if (ver->fwlrole == 2 || ver->fwlrole == 7)
_action_wl_2g_scc_v2(rtwdev);
else if (ver->fwlrole == 8)
_action_wl_2g_scc_v8(rtwdev);
@@ -7250,6 +7564,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
} else if (ver->fwlrole == 2) {
*wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 7) {
+ *wlinfo = r;
+ _update_wl_info_v7(rtwdev, r.pid);
} else if (ver->fwlrole == 8) {
wlinfo = &wl->rlink_info[r.pid][rlink_id];
*wlinfo = r;
@@ -7856,6 +8173,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
@@ -7870,6 +8188,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 7)
+ mode = wl_rinfo_v7->link_mode;
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
@@ -10288,6 +10608,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
@@ -10440,6 +10862,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 7)
+ _show_summary_v7(rtwdev, m);
else if (ver->fcxbtcrpt == 8)
_show_summary_v8(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7b28f2c2a08e..4553810634c6 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -370,7 +370,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
return;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE)
chanctx_idx = roc_idx;
@@ -409,7 +409,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
return -EINVAL;
}
- roc_idx = atomic_read(&hal->roc_entity_idx);
+ roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx != RTW89_CHANCTX_IDLE)
chanctx_idx = roc_idx;
@@ -429,7 +429,7 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev)
if (!entity_active || chan_rcd->band_changed) {
rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
- rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx, chan);
}
rtw89_set_entity_state(rtwdev, true);
@@ -1583,11 +1583,27 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
return ie_len;
}
+static void rtw89_core_parse_phy_status_ie01_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie01_v2 *ie;
+ u8 *rpl_fd = phy_ppdu->rpl_fd;
+
+ ie = (const struct rtw89_phy_sts_ie01_v2 *)iehdr;
+ rpl_fd[RF_PATH_A] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A);
+ rpl_fd[RF_PATH_B] = le32_get_bits(ie->w8, RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B);
+ rpl_fd[RF_PATH_C] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C);
+ rpl_fd[RF_PATH_D] = le32_get_bits(ie->w9, RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D);
+
+ phy_ppdu->bw_idx = le32_get_bits(ie->w5, RTW89_PHY_STS_IE01_V2_W5_BW_IDX);
+}
+
static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
const struct rtw89_phy_sts_iehdr *iehdr,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr;
+ const struct rtw89_phy_sts_ie01 *ie = (const struct rtw89_phy_sts_ie01 *)iehdr;
s16 cfo;
u32 t;
@@ -1598,12 +1614,17 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
}
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RX_PATH_EN);
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
if (!phy_ppdu->to_self)
return;
+ phy_ppdu->rpl_avg = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD);
phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
@@ -1619,6 +1640,39 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
}
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie01_v2(rtwdev, iehdr, phy_ppdu);
+}
+
+static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00 *ie = (const struct rtw89_phy_sts_ie00 *)iehdr;
+ u16 tmp_rpl;
+
+ tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
+ phy_ppdu->rpl_avg = tmp_rpl >> 1;
+}
+
+static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_sts_iehdr *iehdr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_ie00_v2 *ie;
+ u8 *rpl_path = phy_ppdu->rpl_path;
+ u16 tmp_rpl[RF_PATH_MAX];
+ u8 i;
+
+ ie = (const struct rtw89_phy_sts_ie00_v2 *)iehdr;
+ tmp_rpl[RF_PATH_A] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A);
+ tmp_rpl[RF_PATH_B] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B);
+ tmp_rpl[RF_PATH_C] = le32_get_bits(ie->w4, RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C);
+ tmp_rpl[RF_PATH_D] = le32_get_bits(ie->w5, RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D);
+
+ for (i = 0; i < RF_PATH_MAX; i++)
+ rpl_path[i] = tmp_rpl[i] >> 1;
}
static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
@@ -1630,6 +1684,11 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
switch (ie) {
+ case RTW89_PHYSTS_IE00_CMN_CCK:
+ rtw89_core_parse_phy_status_ie00(rtwdev, iehdr, phy_ppdu);
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ rtw89_core_parse_phy_status_ie00_v2(rtwdev, iehdr, phy_ppdu);
+ break;
case RTW89_PHYSTS_IE01_CMN_OFDM:
rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
break;
@@ -1640,6 +1699,13 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_core_update_phy_ppdu_hdr_v2(struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_phy_sts_hdr_v2 *hdr = phy_ppdu->buf + PHY_STS_HDR_LEN;
+
+ phy_ppdu->rx_path_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_V2_W0_PATH_EN);
+}
+
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
@@ -1651,6 +1717,10 @@ static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
+
+ phy_ppdu->hdr_2_en = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_HDR_2_EN);
+ if (phy_ppdu->hdr_2_en)
+ rtw89_core_update_phy_ppdu_hdr_v2(phy_ppdu);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1703,6 +1773,7 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
}
}
+ rtw89_chip_convert_rpl_to_rssi(rtwdev, phy_ppdu);
rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
return 0;
@@ -4263,9 +4334,14 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id_num;
u8 mac_id;
+ if (rtwdev->support_mlo)
+ mac_id_num = chip->support_macid_num / chip->support_link_num;
+ else
+ mac_id_num = chip->support_macid_num;
+
mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
if (mac_id == mac_id_num)
return RTW89_MAX_MAC_ID_NUM;
@@ -4315,6 +4391,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->mcc.wait);
rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
+ rtw89_init_wait(&rtwdev->wow.wait);
+ rtw89_init_wait(&rtwdev->mac.ps_wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
@@ -4681,6 +4759,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
if (chip->chip_gen == RTW89_CHIP_BE)
hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ if (rtwdev->support_mlo)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
@@ -4767,6 +4848,8 @@ EXPORT_SYMBOL(rtw89_core_register);
void rtw89_core_unregister(struct rtw89_dev *rtwdev)
{
rtw89_core_unregister_hw(rtwdev);
+
+ rtw89_debugfs_deinit(rtwdev);
}
EXPORT_SYMBOL(rtw89_core_unregister);
@@ -4781,6 +4864,7 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
struct ieee80211_ops *ops;
u32 driver_data_size;
int fw_format = -1;
+ bool support_mlo;
bool no_chanctx;
firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
@@ -4809,6 +4893,14 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
if (!hw)
goto err;
+ /* TODO: When driver MLO arch. is done, determine whether to support MLO
+ * according to the following conditions.
+ * 1. run with chanctx_ops
+ * 2. chip->support_link_num != 0
+ * 3. FW feature supports AP_LINK_PS
+ */
+ support_mlo = false;
+
hw->wiphy->iface_combinations = rtw89_iface_combs;
if (no_chanctx || chip->support_chanctx_num == 1)
@@ -4823,9 +4915,12 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
rtwdev->chip = chip;
rtwdev->fw.req.firmware = firmware;
rtwdev->fw.fw_format = fw_format;
+ rtwdev->support_mlo = support_mlo;
- rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s chanctx\n",
no_chanctx ? "without" : "with");
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN, "probe driver %s MLO cap\n",
+ support_mlo ? "with" : "without");
return rtwdev;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index b42a33b9868a..4ed9034fdb46 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -21,6 +21,7 @@ struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
+struct rtw89_debugfs;
extern const struct ieee80211_ops rtw89_ops;
@@ -796,6 +797,11 @@ struct rtw89_rx_phy_ppdu {
u8 chan_idx;
u8 ie;
u16 rate;
+ u8 rpl_avg;
+ u8 rpl_path[RF_PATH_MAX];
+ u8 rpl_fd[RF_PATH_MAX];
+ u8 bw_idx;
+ u8 rx_path_en;
struct {
bool has;
u8 avg_snr;
@@ -808,6 +814,7 @@ struct rtw89_rx_phy_ppdu {
bool stbc;
bool to_self;
bool valid;
+ bool hdr_2_en;
};
enum rtw89_mac_idx {
@@ -1589,6 +1596,23 @@ struct rtw89_btc_wl_active_role_v2 {
u32 noa_duration; /* ms */
};
+struct rtw89_btc_wl_active_role_v7 {
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 band;
+ u8 client_ps;
+ u8 bw;
+ u8 role;
+
+ u8 ch;
+ u8 noa_dur;
+ u8 client_cnt;
+ u8 rsvd2;
+} __packed;
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1670,6 +1694,22 @@ struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
} __packed;
#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v7 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+ u32 dbcc_en;
+ u32 dbcc_chg;
+ u32 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+} __packed;
+
struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
u8 connect_cnt;
u8 link_mode;
@@ -1833,6 +1873,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v7 role_info_v7;
struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
@@ -1850,8 +1891,10 @@ struct rtw89_btc_wl_info {
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
bool bg_mode;
+ bool he_mode;
bool scbd_change;
bool fw_ver_mismatch;
+ bool client_cnt_inc_2g;
u32 scbd;
};
@@ -2202,6 +2245,19 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 fver;
u8 rsvd0;
@@ -2220,6 +2276,7 @@ union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v7 v7;
struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
@@ -3544,7 +3601,8 @@ struct rtw89_chip_ops {
void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
void (*rfk_scan)(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
@@ -3561,11 +3619,15 @@ struct rtw89_chip_ops {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ void (*digital_pwr_comp)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*pwr_on_func)(struct rtw89_dev *rtwdev);
int (*pwr_off_func)(struct rtw89_dev *rtwdev);
void (*query_rxdesc)(struct rtw89_dev *rtwdev,
@@ -3916,16 +3978,22 @@ struct rtw89_txpwr_conf {
const void *data;
};
+static inline bool rtw89_txpwr_entcpy(void *entry, const void *cursor, u8 size,
+ const struct rtw89_txpwr_conf *conf)
+{
+ u8 valid_size = min(size, conf->ent_sz);
+
+ memcpy(entry, cursor, valid_size);
+ return true;
+}
+
#define rtw89_txpwr_conf_valid(conf) (!!(conf)->data)
#define rtw89_for_each_in_txpwr_conf(entry, cursor, conf) \
- for (typecheck(const void *, cursor), (cursor) = (conf)->data, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)); \
+ for (typecheck(const void *, cursor), (cursor) = (conf)->data; \
(cursor) < (conf)->data + (conf)->num_ents * (conf)->ent_sz; \
- (cursor) += (conf)->ent_sz, \
- memcpy(&(entry), cursor, \
- min_t(u8, sizeof(entry), (conf)->ent_sz)))
+ (cursor) += (conf)->ent_sz) \
+ if (rtw89_txpwr_entcpy(&(entry), cursor, sizeof(entry), conf))
struct rtw89_txpwr_byrate_data {
struct rtw89_txpwr_conf conf;
@@ -4178,6 +4246,7 @@ struct rtw89_chip_info {
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
u8 support_macid_num;
+ u8 support_link_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
@@ -4342,6 +4411,8 @@ struct rtw89_mac_info {
/* see RTW89_FW_OFLD_WAIT_COND series for wait condition */
struct rtw89_wait_info fw_ofld_wait;
+ /* see RTW89_PS_WAIT_COND series for wait condition */
+ struct rtw89_wait_info ps_wait;
};
enum rtw89_fwdl_check_type {
@@ -4594,7 +4665,7 @@ struct rtw89_hal {
bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
- atomic_t roc_entity_idx;
+ atomic_t roc_chanctx_idx;
DECLARE_BITMAP(changes, NUM_OF_RTW89_CHANCTX_CHANGES);
DECLARE_BITMAP(entity_map, NUM_OF_RTW89_CHANCTX);
@@ -5312,6 +5383,9 @@ struct rtw89_wow_param {
u8 ptk_keyidx;
u8 akm;
+ /* see RTW89_WOW_WAIT_COND series for wait condition */
+ struct rtw89_wait_info wait;
+
bool pno_inited;
struct list_head pno_pkt_list;
struct cfg80211_sched_scan_request *nd_config;
@@ -5421,6 +5495,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ bool support_mlo;
enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
@@ -5527,6 +5602,8 @@ struct rtw89_dev {
struct napi_struct napi;
int napi_budget_countdown;
+ struct rtw89_debugfs *debugfs;
+
/* HCI related data, keep last */
u8 priv[] __aligned(sizeof(void *));
};
@@ -6053,7 +6130,7 @@ const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx)
{
struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_entity_idx);
+ enum rtw89_chanctx_idx roc_idx = atomic_read(&hal->roc_chanctx_idx);
if (roc_idx == idx)
return &hal->roc_chandef;
@@ -6172,12 +6249,13 @@ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev,
}
static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev, phy_idx);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx, chan);
}
static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev,
@@ -6243,6 +6321,15 @@ static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
chip->ops->query_ppdu(rtwdev, phy_ppdu, status);
}
+static inline void rtw89_chip_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->convert_rpl_to_rssi)
+ chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -6274,6 +6361,15 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
}
+static inline void rtw89_chip_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->digital_pwr_comp)
+ chip->ops->digital_pwr_comp(rtwdev, phy_idx);
+}
+
static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl)
{
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index bb65b814585a..29f85210f919 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -52,6 +52,27 @@ struct rtw89_debugfs_priv {
};
};
+struct rtw89_debugfs {
+ struct rtw89_debugfs_priv read_reg;
+ struct rtw89_debugfs_priv write_reg;
+ struct rtw89_debugfs_priv read_rf;
+ struct rtw89_debugfs_priv write_rf;
+ struct rtw89_debugfs_priv rf_reg_dump;
+ struct rtw89_debugfs_priv txpwr_table;
+ struct rtw89_debugfs_priv mac_reg_dump;
+ struct rtw89_debugfs_priv mac_mem_dump;
+ struct rtw89_debugfs_priv mac_dbg_port_dump;
+ struct rtw89_debugfs_priv send_h2c;
+ struct rtw89_debugfs_priv early_h2c;
+ struct rtw89_debugfs_priv fw_crash;
+ struct rtw89_debugfs_priv btc_info;
+ struct rtw89_debugfs_priv btc_manual;
+ struct rtw89_debugfs_priv fw_log_manual;
+ struct rtw89_debugfs_priv phy_info;
+ struct rtw89_debugfs_priv stations;
+ struct rtw89_debugfs_priv disable_dm;
+};
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -3463,9 +3484,9 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
{
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
@@ -3854,92 +3875,55 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
- .cb_read = rtw89_debug_priv_read_reg_get,
- .cb_write = rtw89_debug_priv_read_reg_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = {
- .cb_write = rtw89_debug_priv_write_reg_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = {
- .cb_read = rtw89_debug_priv_read_rf_get,
- .cb_write = rtw89_debug_priv_read_rf_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = {
- .cb_write = rtw89_debug_priv_write_rf_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = {
- .cb_read = rtw89_debug_priv_rf_reg_dump_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = {
- .cb_read = rtw89_debug_priv_txpwr_table_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = {
- .cb_read = rtw89_debug_priv_mac_reg_dump_get,
- .cb_write = rtw89_debug_priv_mac_reg_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = {
- .cb_read = rtw89_debug_priv_mac_mem_dump_get,
- .cb_write = rtw89_debug_priv_mac_mem_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = {
- .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get,
- .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = {
- .cb_write = rtw89_debug_priv_send_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
- .cb_read = rtw89_debug_priv_early_h2c_get,
- .cb_write = rtw89_debug_priv_early_h2c_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = {
- .cb_read = rtw89_debug_priv_fw_crash_get,
- .cb_write = rtw89_debug_priv_fw_crash_set,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
- .cb_read = rtw89_debug_priv_btc_info_get,
-};
-
-static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = {
- .cb_write = rtw89_debug_priv_btc_manual_set,
-};
+#define rtw89_debug_priv_get(name) \
+{ \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = {
- .cb_write = rtw89_debug_fw_log_manual_set,
-};
+#define rtw89_debug_priv_set(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
- .cb_read = rtw89_debug_priv_phy_info_get,
-};
+#define rtw89_debug_priv_select_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _select, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
- .cb_read = rtw89_debug_priv_stations_get,
-};
+#define rtw89_debug_priv_set_and_get(name) \
+{ \
+ .cb_write = rtw89_debug_priv_ ##name## _set, \
+ .cb_read = rtw89_debug_priv_ ##name## _get, \
+}
-static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
- .cb_read = rtw89_debug_priv_disable_dm_get,
- .cb_write = rtw89_debug_priv_disable_dm_set,
+static const struct rtw89_debugfs rtw89_debugfs_templ = {
+ .read_reg = rtw89_debug_priv_select_and_get(read_reg),
+ .write_reg = rtw89_debug_priv_set(write_reg),
+ .read_rf = rtw89_debug_priv_select_and_get(read_rf),
+ .write_rf = rtw89_debug_priv_set(write_rf),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .send_h2c = rtw89_debug_priv_set(send_h2c),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
+ .btc_info = rtw89_debug_priv_get(btc_info),
+ .btc_manual = rtw89_debug_priv_set(btc_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .phy_info = rtw89_debug_priv_get(phy_info),
+ .stations = rtw89_debug_priv_get(stations),
+ .disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
- rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
- if (!debugfs_create_file(#name, mode, \
- parent, &rtw89_debug_priv_ ##name, \
- &file_ops_ ##fopname)) \
+ struct rtw89_debugfs_priv *priv = &rtwdev->debugfs->name; \
+ priv->rtwdev = rtwdev; \
+ if (IS_ERR(debugfs_create_file(#name, mode, parent, priv, \
+ &file_ops_ ##fopname))) \
pr_debug("Unable to initialize debugfs:%s\n", #name); \
} while (0)
@@ -3950,13 +3934,9 @@ static struct rtw89_debugfs_priv rtw89_debug_priv_disable_dm = {
#define rtw89_debugfs_add_r(name) \
rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir)
-void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+static
+void rtw89_debugfs_add_sec0(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
{
- struct dentry *debugfs_topdir;
-
- debugfs_topdir = debugfs_create_dir("rtw89",
- rtwdev->hw->wiphy->debugfsdir);
-
rtw89_debugfs_add_rw(read_reg);
rtw89_debugfs_add_w(write_reg);
rtw89_debugfs_add_rw(read_rf);
@@ -3966,6 +3946,11 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_rw(mac_reg_dump);
rtw89_debugfs_add_rw(mac_mem_dump);
rtw89_debugfs_add_rw(mac_dbg_port_dump);
+}
+
+static
+void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_topdir)
+{
rtw89_debugfs_add_w(send_h2c);
rtw89_debugfs_add_rw(early_h2c);
rtw89_debugfs_add_rw(fw_crash);
@@ -3976,6 +3961,27 @@ void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
}
+
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ rtwdev->debugfs = kmemdup(&rtw89_debugfs_templ,
+ sizeof(rtw89_debugfs_templ), GFP_KERNEL);
+ if (!rtwdev->debugfs)
+ return;
+
+ debugfs_topdir = debugfs_create_dir("rtw89",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw89_debugfs_add_sec0(rtwdev, debugfs_topdir);
+ rtw89_debugfs_add_sec1(rtwdev, debugfs_topdir);
+}
+
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->debugfs);
+}
#endif
#ifdef CONFIG_RTW89_DEBUGMSG
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 800ea59873a1..fc690f7c55dc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -49,8 +49,10 @@ enum rtw89_debug_mac_reg_sel {
#ifdef CONFIG_RTW89_DEBUGFS
void rtw89_debugfs_init(struct rtw89_dev *rtwdev);
+void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev);
#else
static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
+static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 47aa365991c1..d9b0e7ebe619 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -4325,6 +4325,52 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
+ struct rtw89_h2c_cxrole_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+ h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
+ h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
+ h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4343,6 +4389,7 @@ int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fwlrole;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
h2c->_u32.role_map = cpu_to_le32(role->role_map);
@@ -4423,7 +4470,7 @@ int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
return -ENOMEM;
}
skb_put(skb, len);
@@ -5267,10 +5314,8 @@ fail:
}
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode)
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
u32 len = sizeof(*h2c);
@@ -5314,7 +5359,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_iqk *h2c;
u32 len = sizeof(*h2c);
@@ -5349,10 +5395,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_h2c_rf_dpk *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -5392,10 +5437,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_txgapk *h2c;
u32 len = sizeof(*h2c);
@@ -5436,7 +5480,8 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
struct rtw89_h2c_rf_dack *h2c;
u32 len = sizeof(*h2c);
@@ -5472,10 +5517,9 @@ fail:
return ret;
}
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
- RTW89_CHANCTX_0);
struct rtw89_h2c_rf_rxdck *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -7132,10 +7176,10 @@ fail:
int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
struct rtw89_h2c_fwips *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -7154,25 +7198,15 @@ int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_IPS_CFG, 0, 1,
len);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
- if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
- }
- return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
}
int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
{
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtwdev->wow.wait;
struct rtw89_h2c_wow_aoac *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
- unsigned int cond;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -7191,8 +7225,7 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
len);
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
}
/* Return < 0, if failures happen during waiting for the condition.
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 663eda5d0452..ad47e77d740b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -2147,6 +2147,30 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v7_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ struct rtw89_btc_wl_active_role_v7 active_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v7_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+ __le32 dbcc_en;
+ __le32 dbcc_chg;
+ __le32 dbcc_2g_phy;
+} __packed;
+
+struct rtw89_h2c_cxrole_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_wl_role_info_v7_u8 _u8;
+ struct rtw89_btc_wl_role_info_v7_u32 _u32;
+} __packed;
+
struct rtw89_btc_wl_role_info_v8_u8 {
u8 connect_cnt;
u8 link_mode;
@@ -2168,7 +2192,7 @@ struct rtw89_btc_wl_role_info_v8_u32 {
} __packed;
struct rtw89_h2c_cxrole_v8 {
- struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_h2c_cxhdr_v7 hdr;
struct rtw89_btc_wl_role_info_v8_u8 _u8;
struct rtw89_btc_wl_role_info_v8_u32 _u32;
} __packed;
@@ -3991,14 +4015,27 @@ enum rtw89_wow_h2c_func {
NUM_OF_RTW89_WOW_H2C_FUNC,
};
-#define RTW89_WOW_WAIT_COND(func) \
- (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+#define RTW89_WOW_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_WOW_H2C_FUNC + (func))
+
+#define RTW89_WOW_WAIT_COND_AOAC \
+ RTW89_WOW_WAIT_COND(0 /* don't care */, H2C_FUNC_AOAC_REPORT_REQ)
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
-#define H2C_FUNC_MAC_LPS_PARM 0x0
-#define H2C_FUNC_P2P_ACT 0x1
-#define H2C_FUNC_IPS_CFG 0x3
+enum rtw89_ps_h2c_func {
+ H2C_FUNC_MAC_LPS_PARM = 0x0,
+ H2C_FUNC_P2P_ACT = 0x1,
+ H2C_FUNC_IPS_CFG = 0x3,
+
+ NUM_OF_RTW89_PS_H2C_FUNC,
+};
+
+#define RTW89_PS_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_PS_H2C_FUNC + (func))
+
+#define RTW89_PS_WAIT_COND_IPS_CFG \
+ RTW89_PS_WAIT_COND(0 /* don't care */, H2C_FUNC_IPS_CFG)
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -4426,6 +4463,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
@@ -4453,12 +4491,17 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- enum rtw89_tssi_mode tssi_mode);
-int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 89875f59d722..c70a23a763b0 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -2742,6 +2742,7 @@ bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2780,6 +2781,12 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU);
}
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg,
+ B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80);
+ }
+
return 0;
}
@@ -4880,6 +4887,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
{
/* N.B. This will run in interrupt context. */
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait;
const struct rtw89_c2h_done_ack *c2h =
(const struct rtw89_c2h_done_ack *)skb_c2h->data;
u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT);
@@ -4900,6 +4908,18 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
switch (h2c_class) {
default:
return;
+ case H2C_CL_MAC_PS:
+ switch (h2c_func) {
+ default:
+ return;
+ case H2C_FUNC_IPS_CFG:
+ cond = RTW89_PS_WAIT_COND_IPS_CFG;
+ break;
+ }
+
+ data.err = !!h2c_return;
+ rtw89_complete_cond(ps_wait, cond, &data);
+ return;
case H2C_CL_MAC_FW_OFLD:
switch (h2c_func) {
default:
@@ -5158,11 +5178,10 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
{
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
- struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_wait_info *wait = &rtw_wow->wait;
const struct rtw89_c2h_wow_aoac_report *c2h =
(const struct rtw89_c2h_wow_aoac_report *)skb->data;
struct rtw89_completion_data data = {};
- unsigned int cond;
aoac_rpt->rpt_ver = c2h->rpt_ver;
aoac_rpt->sec_type = c2h->sec_type;
@@ -5180,8 +5199,7 @@ rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 le
aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
- cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
- rtw89_complete_cond(wait, cond, &data);
+ rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data);
}
static void
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 9d3be36ffb6e..67c2a4507124 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -421,7 +421,6 @@ enum rtw89_mac_c2h_mrc_func {
enum rtw89_mac_c2h_wow_func {
RTW89_MAC_C2H_FUNC_AOAC_REPORT,
- RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
};
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index aa4fc9115995..c7165e757842 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -356,8 +356,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
csi_mode = RTW89_RA_RPT_MODE_HT;
ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) |
((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) |
- (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
- (sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[1] << 24) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
high_rate_masks = rtw89_ra_mask_ht_rates;
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = 1;
@@ -3084,6 +3084,7 @@ EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms)
{
@@ -3091,7 +3092,7 @@ int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
if (ret)
return ret;
@@ -3101,13 +3102,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3117,13 +3119,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3133,13 +3136,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3149,13 +3153,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -3165,13 +3170,14 @@ EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms)
{
int ret;
rtw89_phy_rfk_report_prep(rtwdev);
- ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan);
if (ret)
return ret;
@@ -5395,7 +5401,7 @@ static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
}
-static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
+static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
@@ -5693,38 +5699,47 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
}
#define IGI_RSSI_MIN 10
+#define ABS_IGI_MIN 0xc
void rtw89_phy_dig(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
+ u8 igi_min;
if (unlikely(dig->bypass_dig)) {
dig->bypass_dig = false;
return;
}
+ rtw89_phy_dig_update_rssi_info(rtwdev);
+
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
rtw89_phy_dig_update_para(rtwdev);
+ dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
rtw89_phy_dig_igi_offset_by_env(rtwdev);
- rtw89_phy_dig_update_rssi_info(rtwdev);
- dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
- dig->igi_rssi - IGI_RSSI_MIN : 0;
- dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
- dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
+ igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
+ dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
+ dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
- dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
- dig->dyn_igi_max);
+ if (dig->dyn_igi_max >= dig->dyn_igi_min) {
+ dig->igi_fa_rssi += dig->fa_rssi_ofst;
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+ } else {
+ dig->igi_fa_rssi = dig->dyn_igi_max;
+ }
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
+ "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 512f17d808fe..6dd8ec46939a 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -907,22 +907,28 @@ int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
unsigned int ms);
int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
enum rtw89_tssi_mode tssi_mode,
unsigned int ms);
int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan,
unsigned int ms);
void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 92074b73ebeb..aebd6404f802 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -98,10 +98,10 @@ static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
-static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
.lastrpwm = RTW89_LAST_RPWM_ACTIVE,
};
@@ -109,6 +109,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
rtw89_fw_leave_lps_check(rtwdev, 0);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+ rtw89_chip_digital_pwr_comp(rtwdev, rtwvif->phy_idx);
}
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
@@ -137,7 +138,7 @@ static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_leave_lps(rtwdev, rtwvif);
}
void rtw89_leave_lps(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 7afec48c4056..69678eab2309 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -2506,6 +2506,10 @@
#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8)
#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+#define R_AX_AGG_LEN_VHT_0 0xC618
+#define R_AX_AGG_LEN_VHT_0_C1 0xE618
+#define B_AX_AMPDU_MAX_LEN_VHT_MASK GENMASK(19, 0)
+
#define S_AX_CTS2S_TH_SEC_256B 1
#define R_AX_SIFS_SETTING 0xC624
#define R_AX_SIFS_SETTING_C1 0xE624
@@ -6044,6 +6048,9 @@
#define R_BE_WP_PAGE_INFO1 0xB7AC
#define B_BE_WP_AVAL_PG_MASK GENMASK(28, 16)
+#define R_BE_LTPC_T0_PATH0 0xBA28
+#define R_BE_LTPC_T0_PATH1 0xBB28
+
#define R_BE_CMAC_SHARE_FUNC_EN 0x0E000
#define B_BE_CMAC_SHARE_CRPRT BIT(31)
#define B_BE_CMAC_SHARE_EN BIT(30)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index e6463035a7af..1679bd408ef3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -1587,29 +1587,31 @@ static void rtw8851b_rfk_init(struct rtw89_dev *rtwdev)
rtw8851b_aack(rtwdev);
rtw8851b_rck(rtwdev);
rtw8851b_dack(rtwdev);
- rtw8851b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8851b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8851b_rx_dck(rtwdev, phy_idx);
- rtw8851b_iqk(rtwdev, phy_idx);
- rtw8851b_tssi(rtwdev, phy_idx, true);
- rtw8851b_dpk(rtwdev, phy_idx);
+ rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8851b_tssi_scan(rtwdev, phy_idx);
+ rtw8851b_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8851b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8851b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8851b_rfk_track(struct rtw89_dev *rtwdev)
@@ -2385,9 +2387,11 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.get_thermal = rtw8851b_get_thermal,
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8851b_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8851b_pwr_on_func,
.pwr_off_func = rtw8851b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2462,6 +2466,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 7942f334066c..364e36354225 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -521,9 +521,10 @@ static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
}
static void _rx_dck_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[RX_DCK] ==== S%d RX DCK (%s / CH%d / %s / by %s)====\n", path,
@@ -574,7 +575,8 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 rf
_rxbb_ofst_swap(rtwdev, path, rf_mode);
}
-static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 rf_reg5;
u8 path;
@@ -584,7 +586,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_af
0x2, rtwdev->hal.cv);
for (path = 0; path < RF_PATH_NUM_8851B; path++) {
- _rx_dck_info(rtwdev, phy, path, is_afe);
+ _rx_dck_info(rtwdev, phy, path, is_afe, chanctx_idx);
rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
@@ -1481,9 +1483,9 @@ static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx = 0;
@@ -1586,10 +1588,11 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -1602,7 +1605,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8851B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1618,9 +1621,10 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
}
static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 *reg,
@@ -1746,9 +1750,9 @@ static void _dpk_init(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2449,7 +2453,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 kip_bkup[RF_PATH_NUM_8851B][DPK_KIP_REG_NUM_8851B] = {};
@@ -2465,7 +2470,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
continue;
_dpk_bkup_kip(rtwdev, dpk_kip_reg, kip_bkup, path);
_dpk_bkup_rf(rtwdev, dpk_rf_reg, rf_bkup, path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
@@ -2505,13 +2510,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** 8851B DPK Start (Ver: 0x%x, Cv: %d) ******\n",
DPK_VER_8851B, rtwdev->hal.cv);
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2617,9 +2623,8 @@ static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8851b_tssi_sys_defs_tbl);
@@ -2650,7 +2655,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8851B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2664,7 +2669,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2755,9 +2759,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2766,9 +2769,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2944,10 +2947,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 gidx, gidx_1st, gidx_2nd;
u8 ch = chan->channel;
s8 de_1st;
@@ -2980,10 +2982,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 tgidx, tgidx_1st, tgidx_2nd;
u8 ch = chan->channel;
s8 tde_1st;
@@ -3017,10 +3018,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3033,7 +3034,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RTW8851B_TSSI_PATH_NR; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3049,8 +3050,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3096,10 +3097,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3255,9 +3256,10 @@ void rtw8851b_dack(struct rtw89_dev *rtwdev)
_dac_cal(rtwdev, false);
}
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3265,30 +3267,32 @@ void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, false);
+ _rx_dck(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3297,7 +3301,7 @@ void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3308,9 +3312,11 @@ void rtw8851b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_A, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -3319,26 +3325,26 @@ void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u32 i;
@@ -3348,20 +3354,21 @@ void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8851B; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3379,7 +3386,7 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x\n",
@@ -3391,12 +3398,13 @@ static void rtw8851b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8851b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
index b66a23d6d367..ea7df628256b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
@@ -12,15 +12,21 @@ void rtw8851b_lck_init(struct rtw89_dev *rtwdev);
void rtw8851b_lck_track(struct rtw89_dev *rtwdev);
void rtw8851b_rck(struct rtw89_dev *rtwdev);
void rtw8851b_dack(struct rtw89_dev *rtwdev);
-void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8851b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8851b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8851b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 7ea388fa0657..dde96bd63021 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -337,6 +337,11 @@ static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = {
PWR_INTF_MSK_PCIE,
PWR_BASE_MAC,
PWR_CMD_WRITE, BIT(0), 0},
+ {0x0092,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0005,
PWR_CV_MSK_ALL,
PWR_INTF_MSK_PCIE,
@@ -1341,24 +1346,26 @@ static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
rtwdev->is_tssi_mode[RF_PATH_B] = false;
rtw8852a_rck(rtwdev);
- rtw8852a_dack(rtwdev);
- rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true);
+ rtw8852a_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true, RTW89_CHANCTX_0);
}
static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852a_rx_dck(rtwdev, phy_idx, true);
- rtw8852a_iqk(rtwdev, phy_idx);
- rtw8852a_tssi(rtwdev, phy_idx);
- rtw8852a_dpk(rtwdev, phy_idx);
+ rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852a_tssi_scan(rtwdev, phy_idx);
+ rtw8852a_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1544,10 +1551,8 @@ static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1569,7 +1574,7 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852a_bb_pmac_info tx_info = {0};
@@ -1579,7 +1584,7 @@ void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.tx_cnt = tx_cnt;
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
@@ -2108,9 +2113,11 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.get_thermal = rtw8852a_get_thermal,
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = NULL,
.set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = NULL,
.pwr_off_func = NULL,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2177,6 +2184,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
index ea82fed7b7be..d6c1acd09238 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -97,10 +97,10 @@ extern const struct rtw89_chip_info rtw8852a_chip_info;
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 6bae8bc07e93..9db8713ac99b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -493,11 +493,12 @@ static void _dack(struct rtw89_dev *rtwdev)
_dack_s1(rtwdev);
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -799,12 +800,13 @@ static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
}
static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool fail = false;
u32 iqk_cmd = 0x0;
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path, chanctx_idx);
u32 addr_rfc_ctl = 0x0;
if (path == RF_PATH_A)
@@ -888,7 +890,8 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
}
static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
@@ -927,7 +930,7 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
}
@@ -952,7 +955,8 @@ static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x0;
@@ -991,7 +995,7 @@ static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
B_CFIR_LUT_GP, group);
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK, chanctx_idx);
switch (iqk_info->iqk_band[path]) {
case RTW89_BAND_2G:
@@ -1040,7 +1044,8 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
}
static bool _txk_group_sel(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
@@ -1083,7 +1088,7 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
B_CFIR_LUT_GP, gp);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK, chanctx_idx);
rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
}
@@ -1098,7 +1103,8 @@ static bool _txk_group_sel(struct rtw89_dev *rtwdev,
}
static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 group = 0x2;
@@ -1131,7 +1137,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
- fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK, chanctx_idx);
if (!fail) {
tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
iqk_info->nb_txcfir[path] = tmp | 0x2;
@@ -1179,7 +1185,8 @@ static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
}
static bool _iqk_lok(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 rf0 = 0x0;
@@ -1210,11 +1217,11 @@ static bool _iqk_lok(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE, chanctx_idx);
iqk_info->lok_cor_fail[0][path] = tmp;
fsleep(10);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
- tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE, chanctx_idx);
iqk_info->lok_fin_fail[0][path] = tmp;
fail = _lok_finetune_check(rtwdev, path);
return fail;
@@ -1321,7 +1328,8 @@ static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
}
static
-void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool lok_is_fail = false;
@@ -1333,30 +1341,35 @@ void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
for (i = 0; i < 3; i++) {
_lok_res_table(rtwdev, path, ibias++);
_iqk_txk_setting(rtwdev, path);
- lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path, chanctx_idx);
if (!lok_is_fail)
break;
}
if (iqk_info->is_nbiqk)
- iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_rxclk_setting(rtwdev, path);
_iqk_rxk_setting(rtwdev, path);
if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
- iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path,
+ chanctx_idx);
else
- iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path,
+ chanctx_idx);
_iqk_info_iqk(rtwdev, phy_idx, path);
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1413,9 +1426,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
}
static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- u8 path)
+ u8 path, enum rtw89_chanctx_idx chanctx_idx)
{
- _iqk_by_path(rtwdev, phy_idx, path);
+ _iqk_by_path(rtwdev, phy_idx, path, chanctx_idx);
}
static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
@@ -1513,7 +1526,8 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw89_rfk_parser(rtwdev, tbl);
}
-static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
+static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 phy_idx = 0x0;
@@ -1525,10 +1539,10 @@ static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
else
phy_idx = RTW89_PHY_1;
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
}
@@ -1607,12 +1621,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1622,12 +1637,12 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852A_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
_iqk_preset(rtwdev, path);
- _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path, chanctx_idx);
_iqk_restore(rtwdev, path);
_iqk_afebb_restore(rtwdev, phy_idx, path);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
@@ -1635,18 +1650,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1656,9 +1672,10 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
#define RXDCK_VER_8852A 0xe
static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool is_afe)
+ enum rtw89_rf_path path, bool is_afe,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u32 ori_val;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -1704,7 +1721,7 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u8 path, kpath, dck_tune;
u32 rf_reg5;
@@ -1732,7 +1749,7 @@ static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ _set_rx_dck(rtwdev, phy, path, is_afe, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -1800,9 +1817,10 @@ static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
}
static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
+ enum rtw89_rf_path path, enum rtw8852a_dpk_id id,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path, chanctx_idx);
u16 dpk_cmd = 0x0;
u32 val;
int ret;
@@ -1841,18 +1859,19 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
- _set_rx_dck(rtwdev, phy, path, false);
+ _set_rx_dck(rtwdev, phy, path, false, chanctx_idx);
}
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 kidx = dpk->cur_idx[path];
dpk->bp[path][kidx].band = chan->band_type;
@@ -1967,7 +1986,8 @@ static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 cur_rxbb;
@@ -1997,7 +2017,7 @@ static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
- _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
@@ -2186,10 +2206,11 @@ static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
}
static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx)
+ enum rtw89_rf_path path, u8 kidx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_tpg_sel(rtwdev, path, kidx);
- _dpk_one_shot(rtwdev, phy, path, SYNC);
+ _dpk_one_shot(rtwdev, phy, path, SYNC, chanctx_idx);
return _dpk_sync_check(rtwdev, path); /*1= fail*/
}
@@ -2242,10 +2263,10 @@ static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
static void _dpk_gainloss(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, enum rtw89_rf_path path,
- u8 kidx)
+ u8 kidx, enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS, chanctx_idx);
}
#define DPK_TXAGC_LOWER 0x2e
@@ -2322,7 +2343,7 @@ static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
#define DPK_AGC_ADJ_LMT 6
#define DPK_DGAIN_UPPER 1922
@@ -2330,7 +2351,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2344,7 +2365,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
do {
switch (step) {
case DPK_AGC_STEP_SYNC_DGAIN:
- if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ if (_dpk_sync(rtwdev, phy, path, kidx, chanctx_idx)) {
tmp_txagc = DPK_TXAGC_INVAL;
goout = true;
break;
@@ -2380,7 +2401,8 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
- _dpk_lbk_rxiqk(rtwdev, phy, path);
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ chanctx_idx);
}
if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
step = DPK_AGC_STEP_SYNC_DGAIN;
@@ -2391,7 +2413,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
break;
case DPK_AGC_STEP_GAIN_LOSS_IDX:
- _dpk_gainloss(rtwdev, phy, path, kidx);
+ _dpk_gainloss(rtwdev, phy, path, kidx, chanctx_idx);
tmp_gl_idx = _dpk_gainloss_read(rtwdev);
if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
@@ -2475,11 +2497,12 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
}
static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kidx, u8 gain)
+ enum rtw89_rf_path path, u8 kidx, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
_dpk_set_mdpd_para(rtwdev, 0x0);
_dpk_table_select(rtwdev, path, kidx, 1);
- _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL, chanctx_idx);
}
static void _dpk_fill_result(struct rtw89_dev *rtwdev,
@@ -2518,10 +2541,10 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2545,7 +2568,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0, kidx = dpk->cur_idx[path];
@@ -2558,16 +2582,16 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_rf_direct_cntrl(rtwdev, path, false);
txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
_dpk_rf_setting(rtwdev, gain, path, kidx);
- _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_rx_dck(rtwdev, phy, path, chanctx_idx);
_dpk_kip_setting(rtwdev, path, kidx);
_dpk_manual_txcfir(rtwdev, path, true);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
if (txagc == DPK_TXAGC_INVAL)
is_fail = true;
_dpk_get_thermal(rtwdev, kidx, path);
- _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain, chanctx_idx);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
_dpk_fill_result(rtwdev, path, kidx, gain, txagc);
_dpk_manual_txcfir(rtwdev, path, false);
@@ -2584,7 +2608,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
@@ -2599,7 +2624,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2624,7 +2650,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_tssi_pause(rtwdev, path, true);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
}
_dpk_bb_afe_setting(rtwdev, phy, path, kpath);
@@ -2633,7 +2659,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)) || reloaded[path])
continue;
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
@@ -2652,10 +2678,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -2682,17 +2709,19 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool force, enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852A_DPK_VER, rtwdev->hal.cv,
RTW8852A_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy),
+ chanctx_idx);
}
static void _dpk_onoff(struct rtw89_dev *rtwdev,
@@ -2815,9 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2826,9 +2854,9 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
}
-static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
@@ -2838,9 +2866,9 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
@@ -2869,7 +2897,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define __get_val(ptr, idx) \
({ \
@@ -2883,7 +2911,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3076,9 +3103,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
}
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 subband = chan->subband_type;
switch (subband) {
@@ -3252,10 +3278,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
@@ -3290,10 +3315,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
@@ -3328,11 +3352,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3352,7 +3375,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = 0; i < RF_PATH_NUM_8852A; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3368,8 +3391,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
__DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3458,10 +3481,10 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
}
}
-static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
@@ -3497,24 +3520,25 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 path, s16 pwr_dbm, u8 enable)
+ u8 path, s16 pwr_dbm, u8 enable, const struct rtw89_chan *chan)
{
rtw8852a_bb_set_plcp_tx(rtwdev);
rtw8852a_bb_cfg_tx_path(rtwdev, path);
rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
- rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
+ rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy, chan);
}
-static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
u8 ch = chan->channel, ch_tmp;
u8 bw = chan->band_width;
u8 band = chan->band_type;
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0, chanctx_idx);
s8 power;
s16 xdbm;
u32 i, tx_counter = 0;
@@ -3546,9 +3570,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true, chan);
mdelay(15);
- _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false, chan);
tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
tx_counter;
@@ -3600,19 +3624,21 @@ void rtw8852a_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852a_dack(struct rtw89_dev *rtwdev)
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3620,34 +3646,35 @@ void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_iqk_init(rtwdev);
if (rtwdev->dbcc_en)
- _iqk_dbcc(rtwdev, phy_idx);
+ _iqk_dbcc(rtwdev, phy_idx, chanctx_idx);
else
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe)
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _rx_dck(rtwdev, phy_idx, is_afe);
+ _rx_dck(rtwdev, phy_idx, is_afe, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
@@ -3655,7 +3682,7 @@ void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3666,8 +3693,10 @@ void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
@@ -3676,26 +3705,27 @@ void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
_tssi_slope_cal_org(rtwdev, phy, i);
_tssi_set_rf_gap_tbl(rtwdev, phy, i);
_tssi_set_slope(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
- _tssi_high_power(rtwdev, phy);
- _tssi_pre_tx(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
+ _tssi_high_power(rtwdev, phy, chan);
+ _tssi_pre_tx(rtwdev, phy, chanctx_idx);
}
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u8 i;
@@ -3710,14 +3740,14 @@ void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_pak(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_pak(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
index fa058ccc8616..8761f2cc9359 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
@@ -8,14 +8,19 @@
#include "core.h"
void rtw8852a_rck(struct rtw89_dev *rtwdev);
-void rtw8852a_dack(struct rtw89_dev *rtwdev);
-void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852a_dack(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
- bool is_afe);
-void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+ bool is_afe, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852a_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852a_tssi_track(struct rtw89_dev *rtwdev);
void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index f26979b89cc9..12be52f76427 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -558,30 +558,32 @@ static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
rtw8852b_dpk_init(rtwdev);
rtw8852b_rck(rtwdev);
- rtw8852b_dack(rtwdev);
- rtw8852b_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852b_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852b_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852b_rx_dck(rtwdev, phy_idx);
- rtw8852b_iqk(rtwdev, phy_idx);
- rtw8852b_tssi(rtwdev, phy_idx, true);
- rtw8852b_dpk(rtwdev, phy_idx);
+ rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852b_tssi_scan(rtwdev, phy_idx);
+ rtw8852b_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852b_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8852b_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
@@ -739,9 +741,11 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -817,6 +821,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 0,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index f364e76e2b34..ede0ca5426ae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -1445,10 +1445,8 @@ static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
static
void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852bx_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
-
if (!tx_info->en_pmac_tx) {
rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
@@ -1473,7 +1471,7 @@ void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
static
void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
struct rtw8852bx_bb_pmac_info tx_info = {0};
@@ -1484,7 +1482,7 @@ void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
tx_info.period = period;
tx_info.tx_time = tx_time;
- rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx, chan);
}
static
@@ -1623,9 +1621,9 @@ static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static
void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u32 rst_mask0;
u32 rst_mask1;
@@ -1713,9 +1711,10 @@ static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
if (rtwdev->hal.rx_nss == 1) {
@@ -1948,6 +1947,19 @@ static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void __rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u8 delta = phy_ppdu->rpl_avg - phy_ppdu->rssi_avg;
+ u8 *rssi = phy_ppdu->rssi;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rssi[i] += delta;
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -2030,6 +2042,7 @@ const struct rtw8852bx_info rtw8852bx_info = {
.ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
.ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = __rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = __rtw8852bx_convert_rpl_to_rssi,
.read_efuse = __rtw8852bx_read_efuse,
.read_phycap = __rtw8852bx_read_phycap,
.power_trim = __rtw8852bx_power_trim,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
index 801e7ab9f4fa..3dce5422f41e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -121,13 +121,14 @@ struct rtw8852bx_info {
void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan);
void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
enum rtw89_phy_idx idx);
void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan);
void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
struct rtw8852bx_bb_tssi_bak *bak);
void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
@@ -145,6 +146,8 @@ struct rtw8852bx_info {
void (*query_ppdu)(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status);
+ void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
@@ -207,9 +210,10 @@ void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
static inline
void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
+ enum rtw89_rf_path_bit rx_path,
+ const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path, chan);
}
static inline
@@ -228,9 +232,10 @@ void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
static inline
void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
+ enum rtw89_phy_idx idx, const struct rtw89_chan *chan)
{
- rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx,
+ chan);
}
static inline
@@ -291,6 +296,13 @@ void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
}
static inline
+void rtw8852bx_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ rtw8852bx_info.convert_rpl_to_rssi(rtwdev, phy_ppdu);
+}
+
+static inline
int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
enum rtw89_efuse_block block)
{
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 776a45d1fe33..ef47a5facc83 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -1382,9 +1382,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
_iqk_info_iqk(rtwdev, phy_idx, path);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 reg_rf18;
u32 reg_35c;
@@ -1608,12 +1609,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852B_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1623,7 +1625,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852B_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
@@ -1638,20 +1640,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1761,9 +1764,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -1786,9 +1789,10 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
@@ -1803,9 +1807,10 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 kpath)
+ enum rtw89_rf_path path, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
@@ -2217,9 +2222,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 step = DPK_AGC_STEP_SYNC_DGAIN;
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
@@ -2416,9 +2421,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2443,7 +2448,8 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2464,7 +2470,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
if (txagc == 0xff) {
@@ -2491,7 +2497,8 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
@@ -2503,7 +2510,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2519,19 +2527,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
- _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath, chanctx_idx);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
- is_fail = _dpk_main(rtwdev, phy, path, 1);
+ is_fail = _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_dpk_onoff(rtwdev, path, is_fail);
}
- _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath, chanctx_idx);
_rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
@@ -2543,9 +2551,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2577,17 +2586,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852B_DPK_VER, rtwdev->hal.cv,
RTW8852B_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, RF_AB);
+ _dpk_cal_select(rtwdev, force, phy, RF_AB, chanctx_idx);
}
static void _dpk_track(struct rtw89_dev *rtwdev)
@@ -2722,9 +2732,8 @@ static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2734,9 +2743,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852b_tssi_sys_defs_tbl);
@@ -2778,7 +2786,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852B_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2792,7 +2800,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -2944,9 +2951,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -2960,9 +2966,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3231,10 +3237,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3267,10 +3272,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3304,10 +3308,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3320,7 +3324,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3336,8 +3340,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3383,10 +3387,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3420,7 +3424,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3436,11 +3440,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3494,7 +3498,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3513,9 +3517,10 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true, chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3546,14 +3551,14 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
tssi_cw_rpt[j] =
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3567,14 +3572,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, 4};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
@@ -3635,7 +3639,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3755,18 +3759,19 @@ void rtw8852b_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852b_dack(struct rtw89_dev *rtwdev)
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3774,15 +3779,16 @@ void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3795,9 +3801,10 @@ void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
@@ -3806,7 +3813,7 @@ void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtwdev->dpk.is_dpk_enable = true;
rtwdev->dpk.is_dpk_reload_en = false;
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -3817,9 +3824,11 @@ void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 tx_en;
u8 i;
@@ -3829,34 +3838,34 @@ void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_e
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852B; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3879,24 +3888,25 @@ void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852B_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3904,7 +3914,7 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
if (enable) {
if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
- rtw8852b_tssi(rtwdev, phy, true);
+ rtw8852b_tssi(rtwdev, phy, true, chanctx_idx);
return;
}
@@ -3921,8 +3931,8 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -3935,12 +3945,13 @@ static void rtw8852b_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852b_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
index f52832065600..c31ba446e6e0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852b_rck(struct rtw89_dev *rtwdev);
-void rtw8852b_dack(struct rtw89_dev *rtwdev);
-void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852b_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index fb98ef9dbc54..7dfdcb5964e1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -480,12 +480,12 @@ static void rtw8852bt_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
}
static void rtw8852bt_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
- u8 phy_idx)
+ u8 phy_idx, const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_B);
- rtw8852bt_tssi_scan(rtwdev, phy_idx);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
} else {
if (phy_idx == RTW89_PHY_0)
rtw8852bt_tssi_cont_en(rtwdev, en, RF_PATH_A);
@@ -511,14 +511,14 @@ static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
if (enter) {
rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
- rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0, chan);
rtw8852bt_adc_en(rtwdev, false);
fsleep(40);
rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
} else {
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
rtw8852bt_adc_en(rtwdev, true);
- rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852bt_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0, chan);
rtw8852bt_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
}
@@ -531,30 +531,32 @@ static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
rtw8852bt_dpk_init(rtwdev);
rtw8852bt_rck(rtwdev);
- rtw8852bt_dack(rtwdev);
- rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0);
+ rtw8852bt_dack(rtwdev, RTW89_CHANCTX_0);
+ rtw8852bt_rx_dck(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
}
static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- rtw8852bt_rx_dck(rtwdev, phy_idx);
- rtw8852bt_iqk(rtwdev, phy_idx);
- rtw8852bt_tssi(rtwdev, phy_idx, true);
- rtw8852bt_dpk(rtwdev, phy_idx);
+ rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852bt_tssi_scan(rtwdev, phy_idx);
+ rtw8852bt_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852bt_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool start)
{
- rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx);
+ rtw8852bt_wifi_scan_notify(rtwdev, start, rtwvif->phy_idx, rtwvif->chanctx_idx);
}
static void rtw8852bt_rfk_track(struct rtw89_dev *rtwdev)
@@ -673,9 +675,11 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.get_thermal = rtw8852bx_get_thermal,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
+ .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852bt_pwr_on_func,
.pwr_off_func = rtw8852bt_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -750,6 +754,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.dig_regs = &rtw8852bt_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 1,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index 278f907fd895..336a83e1d46b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1525,9 +1525,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
lok_result, txk_result, rxk_result);
}
-static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 get_empty_table = false;
u32 reg_rf18;
@@ -1755,12 +1756,13 @@ static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1770,7 +1772,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852BT_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
@@ -1785,20 +1787,21 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u8 kpath = _kpath(rtwdev, phy_idx);
switch (kpath) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1879,9 +1882,9 @@ static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2277,9 +2280,9 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
- bool loss_only)
+ bool loss_only, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
@@ -2504,9 +2507,9 @@ static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 idx, cur_band, cur_ch;
bool is_reload = false;
@@ -2549,7 +2552,8 @@ void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool i
}
static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, u8 gain)
+ enum rtw89_rf_path path, u8 gain,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 txagc = 0x38, kidx = dpk->cur_idx[path];
@@ -2569,7 +2573,7 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
_dpk_kip_set_rxagc(rtwdev, phy, path);
_dpk_table_select(rtwdev, path, kidx, gain);
- txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false, chanctx_idx);
_rfk_get_thermal(rtwdev, kidx, path);
@@ -2601,7 +2605,8 @@ _error:
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u32 backup_kip_val[BACKUP_KIP_REGS_NR];
@@ -2611,7 +2616,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
u8 path;
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path, chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2623,7 +2628,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
}
@@ -2631,7 +2636,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
_rfk_bb_afe_setting(rtwdev, phy, path, kpath);
for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
- _dpk_main(rtwdev, phy, path, 1);
+ _dpk_main(rtwdev, phy, path, 1, chanctx_idx);
_rfk_bb_afe_restore(rtwdev, phy, path, kpath);
@@ -2646,9 +2651,10 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev,
}
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_fem_info *fem = &rtwdev->fem;
if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
@@ -2817,9 +2823,8 @@ static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
@@ -2829,9 +2834,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
@@ -2878,7 +2882,7 @@ static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2893,7 +2897,6 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
})
struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3047,9 +3050,8 @@ static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A)
@@ -3063,9 +3065,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path, bool all)
+ enum rtw89_rf_path path, bool all,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl = NULL;
u8 ch = chan->channel;
@@ -3310,10 +3312,9 @@ static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
@@ -3346,10 +3347,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st;
@@ -3383,10 +3383,10 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
return val;
}
-static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3399,7 +3399,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3415,8 +3415,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx p
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3463,10 +3463,10 @@ static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_p
}
static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 channel = chan->channel;
u8 band;
@@ -3500,7 +3500,7 @@ static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
- u8 enable)
+ u8 enable, const struct rtw89_chan *chan)
{
enum rtw89_rf_path_bit rx_path;
@@ -3516,11 +3516,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (enable) {
rtw8852bx_bb_set_plcp_tx(rtwdev);
rtw8852bx_bb_cfg_tx_path(rtwdev, path);
- rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path, chan);
rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy, chan);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3574,7 +3574,7 @@ static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, const s16 *power,
- u32 *tssi_cw_rpt)
+ u32 *tssi_cw_rpt, const struct rtw89_chan *chan)
{
u32 tx_counter, tx_counter_tmp;
const int retry = 100;
@@ -3593,9 +3593,11 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
_tssi_trigger[path], tmp, path);
if (j == 0)
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true,
+ chan);
else
- _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true,
+ chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3626,7 +3628,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
"[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
k, path);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
return false;
}
@@ -3634,7 +3636,7 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
B_TSSI_CWRPT);
- _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false, chan);
tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
tx_counter_tmp -= tx_counter;
@@ -3648,14 +3650,13 @@ static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
0x78e4, 0x49c0, 0x0d18, 0x0d80};
static const s16 power_2g[4] = {48, 20, 4, -8};
static const s16 power_5g[4] = {48, 20, 4, 4};
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
u8 channel = chan->channel;
@@ -3701,7 +3702,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
- ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt, chan);
if (!ok)
goto out;
@@ -3833,18 +3834,19 @@ void rtw8852bt_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
_dac_cal(rtwdev, false);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
@@ -3852,15 +3854,16 @@ void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
}
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
@@ -3873,15 +3876,16 @@ void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
- if (_dpk_bypass_check(rtwdev, phy_idx))
+ if (_dpk_bypass_check(rtwdev, phy_idx, chanctx_idx))
_dpk_force_bypass(rtwdev, phy_idx);
else
- _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB, chanctx_idx);
}
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
@@ -3889,10 +3893,12 @@ void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB, chanctx_idx);
u32 reg_backup[2] = {};
u32 tx_en;
u8 i;
@@ -3905,36 +3911,36 @@ void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
_tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
_tssi_set_dac_gain_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
_tssi_set_tssi_slope(rtwdev, phy, i);
rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
_tmac_tx_pause(rtwdev, phy, true);
if (hwtx_en)
- _tssi_alimentk(rtwdev, phy, i);
+ _tssi_alimentk(rtwdev, phy, i, chan);
_tmac_tx_pause(rtwdev, phy, false);
rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
_tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
u8 channel = chan->channel;
u8 band;
@@ -3957,24 +3963,25 @@ void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
- _tssi_rf_setting(rtwdev, phy, i);
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_rf_setting(rtwdev, phy, i, chan);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
if (tssi_info->alignment_done[i][band])
- _tssi_alimentk_done(rtwdev, phy, i);
+ _tssi_alimentk_done(rtwdev, phy, i, chan);
else
- _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_alignment_default(rtwdev, phy, i, true, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, bool enable)
+ enum rtw89_phy_idx phy, bool enable,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 channel = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
@@ -3996,8 +4003,8 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
- _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A, chan);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B, chan);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
@@ -4010,12 +4017,13 @@ static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
}
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
if (scan_start)
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true, chanctx_idx);
else
- rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false, chanctx_idx);
}
static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
index ef3d98f80400..e34560b4905f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -8,16 +8,22 @@
#include "core.h"
void rtw8852bt_rck(struct rtw89_dev *rtwdev);
-void rtw8852bt_dack(struct rtw89_dev *rtwdev);
-void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
-void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
-void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool hwtx_en, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
- enum rtw89_phy_idx phy_idx);
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index dc1da9ff055c..1c6e89ab0f4b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1817,7 +1817,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
RTW89_SCH_TX_SEL_ALL);
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx, chan);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
@@ -1825,7 +1825,7 @@ static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx, chan);
rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
@@ -1842,26 +1842,28 @@ static void rtw8852c_rfk_init(struct rtw89_dev *rtwdev)
rtw8852c_dpk_init(rtwdev);
rtw8852c_rck(rtwdev);
- rtw8852c_dack(rtwdev);
+ rtw8852c_dack(rtwdev, RTW89_CHANCTX_0);
rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
}
static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
rtw8852c_rx_dck(rtwdev, phy_idx, false);
- rtw8852c_iqk(rtwdev, phy_idx);
- rtw8852c_tssi(rtwdev, phy_idx);
- rtw8852c_dpk(rtwdev, phy_idx);
+ rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw8852c_tssi_scan(rtwdev, phy_idx);
+ rtw8852c_tssi_scan(rtwdev, phy_idx, chan);
}
static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -2888,9 +2890,11 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.get_thermal = rtw8852c_get_thermal,
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
+ .convert_rpl_to_rssi = NULL,
.ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8852c_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset,
+ .digital_pwr_comp = NULL,
.pwr_on_func = rtw8852c_pwr_on_func,
.pwr_off_func = rtw8852c_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2958,6 +2962,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
+ .support_link_num = 0,
.support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 6e199e82690b..211c051c2967 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -5,6 +5,7 @@
#include "chan.h"
#include "coex.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "rtw8852c.h"
@@ -584,11 +585,12 @@ static void _drck(struct rtw89_dev *rtwdev)
rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
}
-static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dack_info *dack = &rtwdev->dack;
u32 rf0_0, rf1_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB, chanctx_idx);
dack->dack_done = false;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
@@ -1321,9 +1323,10 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
}
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy, u8 path)
+ enum rtw89_phy_idx phy, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
@@ -1516,12 +1519,13 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
}
static void _doiqk(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy_idx, u8 path)
+ enum rtw89_phy_idx phy_idx, u8 path,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u32 backup_bb_val[BACKUP_BB_REGS_NR];
u32 backup_rf_val[RTW8852C_IQK_SS][BACKUP_RF_REGS_NR];
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
@@ -1531,7 +1535,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
iqk_info->version = RTW8852C_IQK_VER;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
- _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_get_ch_info(rtwdev, phy_idx, path, chanctx_idx);
_rfk_backup_bb_reg(rtwdev, backup_bb_val);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
_iqk_macbb_setting(rtwdev, phy_idx, path);
@@ -1544,18 +1548,19 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
}
-static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
switch (_kpath(rtwdev, phy_idx)) {
case RF_A:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
break;
case RF_B:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
case RF_AB:
- _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
- _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A, chanctx_idx);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B, chanctx_idx);
break;
default:
break;
@@ -1901,9 +1906,9 @@ static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 kidx = dpk->cur_idx[path];
@@ -2495,9 +2500,9 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, enum rtw89_chanctx_idx chanctx_idx)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
@@ -2689,7 +2694,8 @@ static void _dpk_drf_direct_cntrl(struct rtw89_dev *rtwdev, u8 path, bool is_byb
}
static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
- enum rtw89_phy_idx phy, u8 kpath)
+ enum rtw89_phy_idx phy, u8 kpath,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
@@ -2705,7 +2711,8 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
if (!(kpath & BIT(path)))
continue;
- reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path,
+ chanctx_idx);
if (!reloaded[path] && dpk->bp[path][0].ch != 0)
dpk->cur_idx[path] = !dpk->cur_idx[path];
else
@@ -2722,7 +2729,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
path, dpk->cur_idx[path]);
_dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
_rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
- _dpk_information(rtwdev, phy, path);
+ _dpk_information(rtwdev, phy, path, chanctx_idx);
_dpk_init(rtwdev, path);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, true);
@@ -2755,10 +2762,11 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_kip_pwr_clk_onoff(rtwdev, false);
}
-static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u8 band = chan->band_type;
if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
@@ -2790,17 +2798,18 @@ static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
}
}
-static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force,
+ enum rtw89_chanctx_idx chanctx_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
RTW8852C_DPK_VER, rtwdev->hal.cv,
RTW8852C_RF_REL_VERSION);
- if (_dpk_bypass_check(rtwdev, phy))
+ if (_dpk_bypass_check(rtwdev, phy, chanctx_idx))
_dpk_force_bypass(rtwdev, phy);
else
- _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy), chanctx_idx);
if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_DCKC, RR_DCKC_CHK) == 0x1)
rtw8852c_rx_dck(rtwdev, phy, false);
@@ -2891,9 +2900,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
}
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_bandwidth bw = chan->band_width;
enum rtw89_band band = chan->band_type;
u32 clk = 0x0;
@@ -2945,9 +2953,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
}
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -2972,7 +2979,7 @@ static void _tssi_set_bbgain_split(struct rtw89_dev *rtwdev, enum rtw89_phy_idx
}
static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
#define RTW8852C_TSSI_GET_VAL(ptr, idx) \
({ \
@@ -2985,8 +2992,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
} \
__val; \
})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
@@ -3001,56 +3008,88 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
switch (subband) {
default:
case RTW89_CH_2G:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_2gb_n;
break;
case RTW89_CH_5G_BAND_1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[0];
break;
case RTW89_CH_5G_BAND_3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[1];
break;
case RTW89_CH_5G_BAND_4:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_5gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX0:
case RTW89_CH_6G_BAND_IDX1:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[0];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[0];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[0];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[0];
break;
case RTW89_CH_6G_BAND_IDX2:
case RTW89_CH_6G_BAND_IDX3:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[1];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[1];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[1];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[1];
break;
case RTW89_CH_6G_BAND_IDX4:
case RTW89_CH_6G_BAND_IDX5:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[2];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[2];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[2];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[2];
break;
case RTW89_CH_6G_BAND_IDX6:
case RTW89_CH_6G_BAND_IDX7:
- thm_up_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
- thm_down_a = rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
- thm_up_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
- thm_down_b = rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
+ thm_up_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_p[3];
+ thm_down_a = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6ga_n[3];
+ thm_up_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_p[3];
+ thm_down_b = trk ? trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3] :
+ rtw89_8852c_trk_cfg.delta_swingidx_6gb_n[3];
break;
}
@@ -3158,9 +3197,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
}
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
@@ -3175,9 +3213,9 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
}
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
@@ -3586,10 +3624,9 @@ static u32 _tssi_get_6g_trim_group(struct rtw89_dev *rtwdev, u8 ch)
}
static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
@@ -3650,10 +3687,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
enum rtw89_band band = chan->band_type;
u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
@@ -3715,10 +3751,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
}
static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy)
+ enum rtw89_phy_idx phy, const struct rtw89_chan *chan)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
@@ -3741,7 +3776,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
for (i = path; i < path_max; i++) {
gidx = _tssi_get_cck_group(rtwdev, ch);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = tssi_info->tssi_cck[i][gidx] + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3757,8 +3792,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
_TSSI_DE_MASK));
- ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
- trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i, chan);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i, chan);
val = ofdm_de + trim_de;
rtw89_debug(rtwdev, RTW89_DBG_TSSI,
@@ -3781,7 +3816,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
}
static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_rf_path path)
+ enum rtw89_rf_path path, const struct rtw89_chan *chan)
{
static const u32 tssi_trk[2] = {0x5818, 0x7818};
static const u32 tssi_en[2] = {0x5820, 0x7820};
@@ -3790,25 +3825,26 @@ static void rtw8852c_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x0);
if (rtwdev->dbcc_en && path == RF_PATH_B)
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_1, chan);
else
- _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0);
+ _tssi_set_efuse_to_de(rtwdev, RTW89_PHY_0, chan);
} else {
rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
rtw89_phy_write32_mask(rtwdev, tssi_en[path], BIT(31), 0x1);
}
}
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan)
{
if (!rtwdev->dbcc_en) {
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
} else {
if (phy_idx == RTW89_PHY_0)
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_A, chan);
else
- rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ rtw8852c_tssi_cont_en(rtwdev, en, RF_PATH_B, chan);
}
}
@@ -4112,26 +4148,27 @@ void rtw8852c_rck(struct rtw89_dev *rtwdev)
_rck(rtwdev, path);
}
-void rtw8852c_dack(struct rtw89_dev *rtwdev)
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx)
{
- u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
- _dac_cal(rtwdev, false);
+ _dac_cal(rtwdev, false, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
}
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
_iqk_init(rtwdev);
- _iqk(rtwdev, phy_idx, false);
+ _iqk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
@@ -4202,10 +4239,11 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ enum rtw89_chanctx_idx chanctx_idx = RTW89_CHANCTX_0;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
u8 dck_channel;
u8 cur_thermal;
u32 tx_en;
@@ -4259,16 +4297,17 @@ void rtw8852c_dpk_init(struct rtw89_dev *rtwdev)
dpk->is_dpk_reload_en = false;
}
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx)
{
u32 tx_en;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0, chanctx_idx);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
_wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
- _dpk(rtwdev, phy_idx, false);
+ _dpk(rtwdev, phy_idx, false, chanctx_idx);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
@@ -4279,8 +4318,10 @@ void rtw8852c_dpk_track(struct rtw89_dev *rtwdev)
_dpk_track(rtwdev);
}
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
@@ -4298,23 +4339,24 @@ void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
_tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
_tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i, chan);
_tssi_set_bbgain_split(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
_tssi_set_slope(rtwdev, phy, i);
_tssi_run_slope(rtwdev, phy, i);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan)
{
u32 i, path = RF_PATH_A, path_max = RF_PATH_NUM_8852C;
@@ -4339,15 +4381,15 @@ void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
_tssi_disable(rtwdev, phy);
for (i = path; i < path_max; i++) {
- _tssi_set_sys(rtwdev, phy, i);
- _tssi_set_dck(rtwdev, phy, i);
- _tssi_set_tmeter_tbl(rtwdev, phy, i);
- _tssi_slope_cal_org(rtwdev, phy, i);
- _tssi_set_aligk_default(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i, chan);
+ _tssi_set_dck(rtwdev, phy, i, chan);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i, chan);
+ _tssi_slope_cal_org(rtwdev, phy, i, chan);
+ _tssi_set_aligk_default(rtwdev, phy, i, chan);
}
_tssi_enable(rtwdev, phy);
- _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy, chan);
}
static void rtw8852c_tssi_default_txagc(struct rtw89_dev *rtwdev,
@@ -4422,7 +4464,7 @@ void rtw8852c_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
dpk->is_dpk_enable = true;
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++)
_dpk_onoff(rtwdev, path, false);
- rtw8852c_dpk(rtwdev, RTW89_PHY_0);
+ rtw8852c_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
break;
default:
break;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 6605137e61aa..306dd0a0be73 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -9,16 +9,21 @@
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
void rtw8852c_rck(struct rtw89_dev *rtwdev);
-void rtw8852c_dack(struct rtw89_dev *rtwdev);
-void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852c_dack(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool is_afe);
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev);
void rtw8852c_dpk_init(struct rtw89_dev *rtwdev);
-void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_chanctx_idx chanctx_idx);
void rtw8852c_dpk_track(struct rtw89_dev *rtwdev);
-void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
-void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8852c_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_chanctx_idx chanctx_idx);
+void rtw8852c_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan);
+void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx,
+ const struct rtw89_chan *chan);
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 6004a622d244..63b1ff2f98ed 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -1689,6 +1689,60 @@ static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
return 0;
}
+#define DIGITAL_PWR_COMP_REG_NUM 22
+static const u32 rtw8922a_digital_pwr_comp_val[][DIGITAL_PWR_COMP_REG_NUM] = {
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x02020100, 0x03030303, 0x01000303,
+ 0x05030302, 0x06060605, 0x06050300, 0x0A090807, 0x02000B0B,
+ 0x09080604, 0x0D0D0C0B, 0x08060400, 0x110F0C0B, 0x05001111,
+ 0x0D0C0907, 0x12121210},
+ {0x012C0096, 0x044C02BC, 0x00322710, 0x015E0096, 0x03C8028A,
+ 0x0BB80708, 0x17701194, 0x04030201, 0x05050505, 0x01000505,
+ 0x07060504, 0x09090908, 0x09070400, 0x0E0D0C0B, 0x03000E0E,
+ 0x0D0B0907, 0x1010100F, 0x0B080500, 0x1512100D, 0x05001515,
+ 0x100D0B08, 0x15151512},
+};
+
+static void rtw8922a_set_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ bool enable, u8 nss,
+ enum rtw89_rf_path path)
+{
+ static const u32 ltpc_t0[2] = {R_BE_LTPC_T0_PATH0, R_BE_LTPC_T0_PATH1};
+ const u32 *digital_pwr_comp;
+ u32 addr, val;
+ u32 i;
+
+ if (nss == 1)
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[0];
+ else
+ digital_pwr_comp = rtw8922a_digital_pwr_comp_val[1];
+
+ addr = ltpc_t0[path];
+ for (i = 0; i < DIGITAL_PWR_COMP_REG_NUM; i++, addr += 4) {
+ val = enable ? digital_pwr_comp[i] : 0;
+ rtw89_phy_write32(rtwdev, addr, val);
+ }
+}
+
+static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ bool enable = chan->band_type != RTW89_BAND_2G;
+ u8 path;
+
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ path = RF_PATH_A;
+ else
+ path = RF_PATH_B;
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 1, path);
+ } else {
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_A);
+ rtw8922a_set_digital_pwr_comp(rtwdev, enable, 2, RF_PATH_B);
+ }
+}
+
static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
@@ -1810,11 +1864,13 @@ static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
}
static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
- enum rtw89_mlo_dbcc_mode mode)
+ enum rtw89_mlo_dbcc_mode mode,
+ enum rtw89_phy_idx phy_idx)
{
if (!rtwdev->dbcc_en)
return;
+ rtw8922a_digital_pwr_comp(rtwdev, phy_idx);
rtw8922a_ctrl_mlo(rtwdev, mode);
}
@@ -1921,7 +1977,7 @@ static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
if (!enter) {
- rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode, phy_idx);
rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
}
}
@@ -1937,10 +1993,12 @@ static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
- rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, chan, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
}
static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
@@ -1964,8 +2022,10 @@ static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ enum rtw89_chanctx_idx chanctx_idx = rtwvif->chanctx_idx;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
enum rtw89_phy_idx phy_idx = rtwvif->phy_idx;
- u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, RTW89_CHANCTX_0);
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB, chanctx_idx);
u32 tx_en;
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
@@ -1973,20 +2033,21 @@ static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtw
_wait_rx_mode(rtwdev, RF_AB);
rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
- rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
- rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
- rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
- rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, chan, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, chan, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, chan, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, chan, 32);
rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
}
static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
+ enum rtw89_phy_idx phy_idx,
+ const struct rtw89_chan *chan)
{
- rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, chan, RTW89_TSSI_SCAN, 6);
}
static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -2436,6 +2497,38 @@ static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
}
+static void rtw8922a_convert_rpl_to_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ /* Mapping to BW: 5, 10, 20, 40, 80, 160, 80_80 */
+ static const u8 bw_compensate[] = {0, 0, 0, 6, 12, 18, 0};
+ u8 *rssi = phy_ppdu->rssi;
+ u8 compensate = 0;
+ u16 rpl_tmp;
+ u8 i;
+
+ if (phy_ppdu->bw_idx < ARRAY_SIZE(bw_compensate))
+ compensate = bw_compensate[phy_ppdu->bw_idx];
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ if (!(phy_ppdu->rx_path_en & BIT(i))) {
+ rssi[i] = 0;
+ phy_ppdu->rpl_path[i] = 0;
+ phy_ppdu->rpl_fd[i] = 0;
+ }
+ if (phy_ppdu->rate >= RTW89_HW_RATE_OFDM6) {
+ rpl_tmp = phy_ppdu->rpl_fd[i];
+ if (rpl_tmp)
+ rpl_tmp += compensate;
+
+ phy_ppdu->rpl_path[i] = rpl_tmp;
+ }
+ rssi[i] = phy_ppdu->rpl_path[i];
+ }
+
+ phy_ppdu->rssi_avg = phy_ppdu->rpl_avg;
+}
+
static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
@@ -2455,10 +2548,12 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = RTW89_MAX_PATTERN_NUM,
.pattern_max_len = RTW89_MAX_PATTERN_SIZE,
.pattern_min_len = 1,
+ .max_nd_match_sets = RTW89_SCANOFLD_MAX_SSID,
};
#endif
@@ -2491,9 +2586,11 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.get_thermal = rtw8922a_get_thermal,
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
+ .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
.ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
.cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
.set_txpwr_ul_tb_offset = NULL,
+ .digital_pwr_comp = rtw8922a_digital_pwr_comp,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc_v2,
@@ -2559,6 +2656,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
.support_macid_num = 32,
+ .support_link_num = 2,
.support_chanctx_num = 2,
.support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index a33280ec2a25..b2e47829983f 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -441,6 +441,7 @@ struct rtw89_phy_sts_hdr {
} __packed;
#define RTW89_PHY_STS_HDR_W0_IE_MAP GENMASK(4, 0)
+#define RTW89_PHY_STS_HDR_W0_HDR_2_EN BIT(5)
#define RTW89_PHY_STS_HDR_W0_VALID BIT(7)
#define RTW89_PHY_STS_HDR_W0_LEN GENMASK(15, 8)
#define RTW89_PHY_STS_HDR_W0_RSSI_AVG GENMASK(31, 24)
@@ -449,6 +450,13 @@ struct rtw89_phy_sts_hdr {
#define RTW89_PHY_STS_HDR_W1_RSSI_C GENMASK(23, 16)
#define RTW89_PHY_STS_HDR_W1_RSSI_D GENMASK(31, 24)
+struct rtw89_phy_sts_hdr_v2 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_PHY_STS_HDR_V2_W0_PATH_EN GENMASK(20, 16)
+
struct rtw89_phy_sts_iehdr {
__le32 w0;
};
@@ -552,13 +560,43 @@ struct rtw89_phy_sts_iehdr {
#define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16)
#define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21)
-struct rtw89_phy_sts_ie0 {
+struct rtw89_phy_sts_ie00 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+
+struct rtw89_phy_sts_ie00_v2 {
__le32 w0;
__le32 w1;
__le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_A GENMASK(8, 0)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_B GENMASK(17, 9)
+#define RTW89_PHY_STS_IE00_V2_W4_RPL_TD_C GENMASK(26, 18)
+#define RTW89_PHY_STS_IE00_V2_W5_RPL_TD_D GENMASK(8, 0)
+
+struct rtw89_phy_sts_ie01 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
} __packed;
#define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W0_RSSI_AVG_FD GENMASK(15, 8)
+#define RTW89_PHY_STS_IE01_W0_RX_PATH_EN GENMASK(31, 28)
#define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8)
#define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20)
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
@@ -567,6 +605,25 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
+struct rtw89_phy_sts_ie01_v2 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+} __packed;
+
+#define RTW89_PHY_STS_IE01_V2_W5_BW_IDX GENMASK(31, 29)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_A GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W8_RPL_FD_B GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_C GENMASK(11, 4)
+#define RTW89_PHY_STS_IE01_V2_W9_RPL_FD_D GENMASK(23, 16)
+
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
RTW89_TXCH_ACH1 = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 0f0f4beec4d9..86e24e07780d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -1438,6 +1438,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ int interval = rtw_wow->nd_config->scan_plans[0].interval;
struct rtw89_scan_option opt = {};
int ret;
@@ -1457,7 +1458,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
opt.enable = enable;
opt.repeat = RTW89_SCAN_NORMAL;
- opt.norm_pd = 10; /* in unit of 100ms */
+ opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */
opt.delay = max(rtw_wow->nd_config->delay, 1);
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
index a6a28640ad40..bbc1200dbb62 100644
--- a/drivers/net/wireless/rsi/rsi_debugfs.h
+++ b/drivers/net/wireless/rsi/rsi_debugfs.h
@@ -39,7 +39,6 @@ struct rsi_dbg_files {
struct rsi_debugfs {
struct dentry *subdir;
- struct rsi_dbg_ops *dfs_get_ops;
struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
};
int rsi_init_dbgfs(struct rsi_hw *adapter);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 34d95f458e1a..a9f090e15cbb 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -142,7 +142,7 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
wl18xx_radar_type_decode(mbox->radar_type));
if (!wl->radar_debug_mode)
- ieee80211_radar_detected(wl->hw);
+ ieee80211_radar_detected(wl->hw, NULL);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 5fe9e4e26142..f0e528abb1b4 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -1146,7 +1146,7 @@ static int hwsim_write_simulate_radar(void *dat, u64 val)
{
struct mac80211_hwsim_data *data = dat;
- ieee80211_radar_detected(data->hw);
+ ieee80211_radar_detected(data->hw, NULL);
return 0;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0dc8bcc664f2..983909a600ad 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4437,7 +4437,8 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
{
- dev_warn(ctrl->device, "resetting controller due to AER\n");
+ dev_warn(ctrl->device,
+ "resetting controller due to persistent internal error\n");
nvme_reset_ctrl(ctrl);
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 91d9eb3c22ef..518e22dd4f9b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -616,7 +616,9 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_set_stacking_limits(&lim);
lim.dma_alignment = 3;
lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
- if (head->ids.csi != NVME_CSI_ZNS)
+ if (head->ids.csi == NVME_CSI_ZNS)
+ lim.features |= BLK_FEAT_ZONED;
+ else
lim.max_zone_append_sectors = 0;
head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6cd9395ba9ec..c0533f3f64cb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2508,6 +2508,12 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
{
+ if (!dev->ctrl.tagset) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+ return;
+ }
+
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
/* free previously allocated queues that are no longer usable */
nvme_free_queues(dev, dev->online_queues);
@@ -2967,6 +2973,17 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"))
return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) {
+ /*
+ * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
+ * because of high power consumption (> 2 Watt) in s2idle
+ * sleep. Only some boards with Intel CPU are affected.
+ */
+ if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
+ dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
+ dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
+ return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND;
}
/*
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index f7e1156ac7ec..85006b2df8ae 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -587,6 +587,16 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
u16 status = 0;
int i = 0;
+ /*
+ * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
+ * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
+ */
+ if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
+ status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+ goto out;
+ }
+
list = kzalloc(buf_size, GFP_KERNEL);
if (!list) {
status = NVME_SC_INTERNAL;
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
index cb2befc8619e..220c7391fc19 100644
--- a/drivers/nvme/target/debugfs.c
+++ b/drivers/nvme/target/debugfs.c
@@ -13,7 +13,7 @@
#include "nvmet.h"
#include "debugfs.h"
-struct dentry *nvmet_debugfs;
+static struct dentry *nvmet_debugfs;
#define NVMET_DEBUGFS_ATTR(field) \
static int field##_open(struct inode *inode, struct file *file) \
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 5bff0d5464d1..7c51c2a8c109 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -2146,8 +2146,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
}
queue->nr_cmds = sq->size * 2;
- if (nvmet_tcp_alloc_cmds(queue))
+ if (nvmet_tcp_alloc_cmds(queue)) {
+ queue->nr_cmds = 0;
return NVME_SC_INTERNAL;
+ }
return 0;
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 516dfd861b9f..33ffa2aa4c11 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1276,13 +1276,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
EXPORT_SYMBOL_GPL(nvmem_device_put);
/**
- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ * devm_nvmem_device_get() - Get nvmem device of device form a given id
*
* @dev: Device that requests the nvmem device.
* @id: name id for the requested nvmem device.
*
- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
- * on success. The nvmem_cell will be freed by the automatically once the
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success. The nvmem_device will be freed by the automatically once the
* device is freed.
*/
struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index 936e39b20b38..593f0bf4a395 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -176,6 +176,13 @@ static int u_boot_env_parse(struct u_boot_env *priv)
data_offset = offsetof(struct u_boot_env_image_broadcom, data);
break;
}
+
+ if (dev_size < data_offset) {
+ dev_err(dev, "Device too small for u-boot-env\n");
+ err = -EIO;
+ goto err_kfree;
+ }
+
crc32_addr = (__le32 *)(buf + crc32_offset);
crc32 = le32_to_cpu(*crc32_addr);
crc32_data_len = dev_size - crc32_data_offset;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 5f4598246a87..494f8860220d 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1061,6 +1061,27 @@ static int _set_opp_bw(const struct opp_table *opp_table,
return 0;
}
+static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
+{
+ unsigned int level = 0;
+ int ret = 0;
+
+ if (opp) {
+ if (opp->level == OPP_LEVEL_UNSET)
+ return 0;
+
+ level = opp->level;
+ }
+
+ /* Request a new performance state through the device's PM domain. */
+ ret = dev_pm_domain_set_performance_state(dev, level);
+ if (ret)
+ dev_err(dev, "Failed to set performance state %u (%d)\n", level,
+ ret);
+
+ return ret;
+}
+
/* This is only called for PM domain for now */
static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
struct dev_pm_opp *opp, bool up)
@@ -1091,7 +1112,7 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
if (devs[index]) {
required_opp = opp ? opp->required_opps[index] : NULL;
- ret = dev_pm_opp_set_opp(devs[index], required_opp);
+ ret = _set_opp_level(devs[index], required_opp);
if (ret)
return ret;
}
@@ -1102,27 +1123,6 @@ static int _set_required_opps(struct device *dev, struct opp_table *opp_table,
return 0;
}
-static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp)
-{
- unsigned int level = 0;
- int ret = 0;
-
- if (opp) {
- if (opp->level == OPP_LEVEL_UNSET)
- return 0;
-
- level = opp->level;
- }
-
- /* Request a new performance state through the device's PM domain. */
- ret = dev_pm_domain_set_performance_state(dev, level);
- if (ret)
- dev_err(dev, "Failed to set performance state %u (%d)\n", level,
- ret);
-
- return ret;
-}
-
static void _find_current_opp(struct device *dev, struct opp_table *opp_table)
{
struct dev_pm_opp *opp = ERR_PTR(-ENODEV);
@@ -2457,18 +2457,6 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
}
}
- /*
- * Add the virtual genpd device as a user of the OPP table, so
- * we can call dev_pm_opp_set_opp() on it directly.
- *
- * This will be automatically removed when the OPP table is
- * removed, don't need to handle that here.
- */
- if (!_add_opp_dev(virt_dev, opp_table->required_opp_tables[index])) {
- ret = -ENOMEM;
- goto err;
- }
-
opp_table->required_devs[index] = virt_dev;
index++;
name++;
diff --git a/drivers/pci/pwrctl/core.c b/drivers/pci/pwrctl/core.c
index feca26ad2f6a..01d913b60316 100644
--- a/drivers/pci/pwrctl/core.c
+++ b/drivers/pci/pwrctl/core.c
@@ -48,6 +48,28 @@ static int pci_pwrctl_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_DONE;
}
+static void rescan_work_func(struct work_struct *work)
+{
+ struct pci_pwrctl *pwrctl = container_of(work, struct pci_pwrctl, work);
+
+ pci_lock_rescan_remove();
+ pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus);
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pci_pwrctl_init() - Initialize the PCI power control context struct
+ *
+ * @pwrctl: PCI power control data
+ * @dev: Parent device
+ */
+void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev)
+{
+ pwrctl->dev = dev;
+ INIT_WORK(&pwrctl->work, rescan_work_func);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctl_init);
+
/**
* pci_pwrctl_device_set_ready() - Notify the pwrctl subsystem that the PCI
* device is powered-up and ready to be detected.
@@ -74,9 +96,7 @@ int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl)
if (ret)
return ret;
- pci_lock_rescan_remove();
- pci_rescan_bus(to_pci_dev(pwrctl->dev->parent)->bus);
- pci_unlock_rescan_remove();
+ schedule_work(&pwrctl->work);
return 0;
}
diff --git a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
index c7a113a76c0c..f07758c9edad 100644
--- a/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
+++ b/drivers/pci/pwrctl/pci-pwrctl-pwrseq.c
@@ -50,7 +50,7 @@ static int pci_pwrctl_pwrseq_probe(struct platform_device *pdev)
if (ret)
return ret;
- data->ctx.dev = dev;
+ pci_pwrctl_init(&data->ctx, dev);
ret = devm_pci_pwrctl_device_set_ready(dev, &data->ctx);
if (ret)
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 910387e5bdbf..4770cb87e3f0 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -14,12 +17,25 @@ static void pci_free_resources(struct pci_dev *dev)
}
}
+static int pci_pwrctl_unregister(struct device *dev, void *data)
+{
+ struct device_node *pci_node = data, *plat_node = dev_of_node(dev);
+
+ if (dev_is_platform(dev) && plat_node && plat_node == pci_node) {
+ of_device_unregister(to_platform_device(dev));
+ of_node_clear_flag(plat_node, OF_POPULATED);
+ }
+
+ return 0;
+}
+
static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
if (pci_dev_is_added(dev)) {
- of_platform_depopulate(&dev->dev);
+ device_for_each_child(dev->dev.parent, dev_of_node(&dev->dev),
+ pci_pwrctl_unregister);
device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index 65ed933f05ce..abfcdd3da9e8 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1839,7 +1839,9 @@ static const struct msm_pinctrl_soc_data x1e80100_pinctrl = {
.ngroups = ARRAY_SIZE(x1e80100_groups),
.ngpios = 239,
.wakeirq_map = x1e80100_pdc_map,
- .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map),
+ /* TODO: Enabling PDC currently breaks GPIO interrupts */
+ .nwakeirq_map = 0,
+ /* .nwakeirq_map = ARRAY_SIZE(x1e80100_pdc_map), */
.egpio_func = 9,
};
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index a111eca8ff57..49c383eb6785 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -70,7 +70,7 @@ config TURRIS_OMNIA_MCU_TRNG
bool "Turris Omnia MCU true random number generator"
default y
depends on TURRIS_OMNIA_MCU_GPIO
- depends on HW_RANDOM
+ depends on HW_RANDOM=y || HW_RANDOM=TURRIS_OMNIA_MCU
help
Say Y here to add support for the true random number generator
provided by CZ.NIC's Turris Omnia MCU.
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 37636e5a38e3..321a658e4554 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1793,6 +1793,16 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
goto error;
}
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) {
+ /*
+ * Disable OOBE state, so that e.g. the keyboard backlight
+ * works.
+ */
+ rv = asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL);
+ if (rv)
+ goto error;
+ }
+
error:
if (rv)
asus_wmi_led_exit(asus);
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index cf845ee1c7b1..ebd81846e2d5 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -337,7 +337,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
}
if (pcc->num_sifr < hkey->package.count) {
- pr_err("SQTY reports bad SINF length\n");
+ pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n",
+ pcc->num_sifr, hkey->package.count);
status = AE_ERROR;
goto end;
}
@@ -773,6 +774,24 @@ static DEVICE_ATTR_RW(dc_brightness);
static DEVICE_ATTR_RW(current_brightness);
static DEVICE_ATTR_RW(cdpower);
+static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pcc_acpi *pcc = acpi_driver_data(acpi);
+
+ if (attr == &dev_attr_mute.attr)
+ return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0;
+
+ if (attr == &dev_attr_eco_mode.attr)
+ return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0;
+
+ if (attr == &dev_attr_current_brightness.attr)
+ return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0;
+
+ return attr->mode;
+}
+
static struct attribute *pcc_sysfs_entries[] = {
&dev_attr_numbatt.attr,
&dev_attr_lcdtype.attr,
@@ -787,8 +806,9 @@ static struct attribute *pcc_sysfs_entries[] = {
};
static const struct attribute_group pcc_attr_group = {
- .name = NULL, /* put in device directory */
- .attrs = pcc_sysfs_entries,
+ .name = NULL, /* put in device directory */
+ .attrs = pcc_sysfs_entries,
+ .is_visible = pcc_sysfs_is_visible,
};
@@ -941,12 +961,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
if (!pcc)
return -EINVAL;
- acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
- acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ if (pcc->num_sifr > SINF_MUTE)
+ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+ if (pcc->num_sifr > SINF_ECO_MODE)
+ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
- acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+ if (pcc->num_sifr > SINF_CUR_BRIGHT)
+ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
return 0;
}
@@ -963,11 +986,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
num_sifr = acpi_pcc_get_sqty(device);
- if (num_sifr < 0 || num_sifr > 255) {
- pr_err("num_sifr out of range");
+ /*
+ * pcc->sinf is expected to at least have the AC+DC brightness entries.
+ * Accesses to higher SINF entries are checked against num_sifr.
+ */
+ if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) {
+ pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1);
return -ENODEV;
}
+ /*
+ * Some DSDT-s have an off-by-one bug where the SINF package count is
+ * one higher than the SQTY reported value, allocate 1 entry extra.
+ */
+ num_sifr++;
+
pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
if (!pcc) {
pr_err("Couldn't allocate mem for pcc");
@@ -1020,11 +1053,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
pcc->sticky_key = 0;
- pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
- pcc->mute = pcc->sinf[SINF_MUTE];
pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
- pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+ if (pcc->num_sifr > SINF_MUTE)
+ pcc->mute = pcc->sinf[SINF_MUTE];
+ if (pcc->num_sifr > SINF_ECO_MODE)
+ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+ if (pcc->num_sifr > SINF_CUR_BRIGHT)
+ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
/* add sysfs attributes */
result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index fd754a99cf2e..f85eb41cb084 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -412,7 +412,7 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, unsigned int ch)
/* Enable channel */
mask = TIM_CCER_CCxE(ch + 1);
if (priv->have_complementary_output)
- mask |= TIM_CCER_CCxNE(ch);
+ mask |= TIM_CCER_CCxNE(ch + 1);
regmap_set_bits(priv->regmap, TIM_CCER, mask);
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index a7a5cdcc6590..47e7d7e6d920 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -114,7 +114,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
clk_freq = clk_get_rate(st->mclk);
- if (fout > (clk_freq / 2))
+ if (!clk_freq || fout > (clk_freq / 2))
return -EINVAL;
regval = ad9834_calc_freqreg(clk_freq, fout);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
index b90b5b330dfa..8ba65161f7a9 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h
+++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
@@ -32,12 +32,24 @@
#define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
/* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
-#define sDIGIT_FITTING(v, a, b) \
- min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
- sISP_VAL_MIN), sISP_VAL_MAX)
-#define uDIGIT_FITTING(v, a, b) \
- min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
- >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
- uISP_VAL_MIN), uISP_VAL_MAX)
+static inline int sDIGIT_FITTING(int v, int a, int b)
+{
+ int fit_shift = sFRACTION_BITS_FITTING(a) - b;
+
+ v >>= sSHIFT;
+ v >>= fit_shift > 0 ? fit_shift : 0;
+
+ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
+}
+
+static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
+{
+ int fit_shift = uFRACTION_BITS_FITTING(a) - b;
+
+ v >>= uSHIFT;
+ v >>= fit_shift > 0 ? fit_shift : 0;
+
+ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
+}
#endif /* __SH_CSS_FRAC_H */
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 02c9064284e1..9a5919434c4e 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1026,6 +1026,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
+
ufs_mtk_init_clocks(hba);
/*
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index b45653752301..870409599411 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -106,10 +106,11 @@ static void hv_uio_channel_cb(void *context)
/*
* Callback from vmbus_event when channel is rescinded.
+ * It is meant for rescind of primary channels only.
*/
static void hv_uio_rescind(struct vmbus_channel *channel)
{
- struct hv_device *hv_dev = channel->primary_channel->device_obj;
+ struct hv_device *hv_dev = channel->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
/*
@@ -120,6 +121,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
/* Wake up reader */
uio_event_notify(&pdata->info);
+
+ /*
+ * With rescind callback registered, rescind path will not unregister the device
+ * from vmbus when the primary channel is rescinded.
+ * Without it, rescind handling is incomplete and next onoffer msg does not come.
+ * Unregister the device from vmbus here.
+ */
+ vmbus_device_unregister(channel->device_obj);
}
/* Sysfs API to allow mmap of the ring buffers
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ccc3895dbd7f..9eb085f359ce 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1387,6 +1387,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
/*
+ * STAR 9001285599: This issue affects DWC_usb3 version 3.20a
+ * only. If the PM TIMER ECM is enabled through GUCTL2[19], the
+ * link compliance test (TD7.21) may fail. If the ECN is not
+ * enabled (GUCTL2[19] = 0), the controller will use the old timer
+ * value (5us), which is still acceptable for the link compliance
+ * test. Therefore, do not enable PM TIMER ECM in 3.20a by
+ * setting GUCTL2[19] by default; instead, use GUCTL2[19] = 0.
+ */
+ if (DWC3_VER_IS(DWC3, 320A)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
+ reg &= ~DWC3_GUCTL2_LC_TIMER;
+ dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
+ }
+
+ /*
* When configured in HOST mode, after issuing U3/L2 exit controller
* fails to send proper CRC checksum in CRC5 feild. Because of this
* behaviour Transaction Error is generated, resulting in reset and
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 1e561fd8b86e..c71240e8f7c7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -421,6 +421,7 @@
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
+#define DWC3_GUCTL2_LC_TIMER BIT(19)
/* Global User Control Register 3 */
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
@@ -1269,6 +1270,7 @@ struct dwc3 {
#define DWC3_REVISION_290A 0x5533290a
#define DWC3_REVISION_300A 0x5533300a
#define DWC3_REVISION_310A 0x5533310a
+#define DWC3_REVISION_320A 0x5533320a
#define DWC3_REVISION_330A 0x5533330a
#define DWC31_REVISION_ANY 0x0
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89fc690fdf34..291bc549935b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -287,6 +287,23 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async);
*
* Caller should handle locking. This function will issue @cmd with given
* @params to @dep and wait for its completion.
+ *
+ * According to the programming guide, if the link state is in L1/L2/U3,
+ * then sending the Start Transfer command may not complete. The
+ * programming guide suggested to bring the link state back to ON/U0 by
+ * performing remote wakeup prior to sending the command. However, don't
+ * initiate remote wakeup when the user/function does not send wakeup
+ * request via wakeup ops. Send the command when it's allowed.
+ *
+ * Notes:
+ * For L1 link state, issuing a command requires the clearing of
+ * GUSB2PHYCFG.SUSPENDUSB2, which turns on the signal required to complete
+ * the given command (usually within 50us). This should happen within the
+ * command timeout set by driver. No additional step is needed.
+ *
+ * For L2 or U3 link state, the gadget is in USB suspend. Care should be
+ * taken when sending Start Transfer command to ensure that it's done after
+ * USB resume.
*/
int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
struct dwc3_gadget_ep_cmd_params *params)
@@ -327,30 +344,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
- if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
- int link_state;
-
- /*
- * Initiate remote wakeup if the link state is in U3 when
- * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
- * link state is in U1/U2, no remote wakeup is needed. The Start
- * Transfer command will initiate the link recovery.
- */
- link_state = dwc3_gadget_get_link_state(dwc);
- switch (link_state) {
- case DWC3_LINK_STATE_U2:
- if (dwc->gadget->speed >= USB_SPEED_SUPER)
- break;
-
- fallthrough;
- case DWC3_LINK_STATE_U3:
- ret = __dwc3_gadget_wakeup(dwc, false);
- dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
- ret);
- break;
- }
- }
-
/*
* For some commands such as Update Transfer command, DEPCMDPARn
* registers are reserved. Since the driver often sends Update Transfer
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index 0eed0e03842c..d394affb7072 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -2251,7 +2251,6 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
{
u32 max_speed;
void *buf;
- int val;
int ret;
pdev->usb_regs = pdev->regs;
@@ -2261,14 +2260,9 @@ static int cdns2_gadget_start(struct cdns2_device *pdev)
pdev->adma_regs = pdev->regs + CDNS2_ADMA_REGS_OFFSET;
/* Reset controller. */
- set_reg_bit_8(&pdev->usb_regs->cpuctrl, CPUCTRL_SW_RST);
-
- ret = readl_poll_timeout_atomic(&pdev->usb_regs->cpuctrl, val,
- !(val & CPUCTRL_SW_RST), 1, 10000);
- if (ret) {
- dev_err(pdev->dev, "Error: reset controller timeout\n");
- return -EINVAL;
- }
+ writeb(CPUCTRL_SW_RST | CPUCTRL_UPCLK | CPUCTRL_WUEN,
+ &pdev->usb_regs->cpuctrl);
+ usleep_range(5, 10);
usb_initialize_gadget(pdev->dev, &pdev->gadget, NULL);
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
index 71e2f62d653a..b5d5ec12e986 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.h
@@ -292,8 +292,17 @@ struct cdns2_usb_regs {
#define SPEEDCTRL_HSDISABLE BIT(7)
/* CPUCTRL- bitmasks. */
+/* UP clock enable */
+#define CPUCTRL_UPCLK BIT(0)
/* Controller reset bit. */
#define CPUCTRL_SW_RST BIT(1)
+/**
+ * If the wuen bit is ‘1’, the upclken is automatically set to ‘1’ after
+ * detecting rising edge of wuintereq interrupt. If the wuen bit is ‘0’,
+ * the wuintereq interrupt is ignored.
+ */
+#define CPUCTRL_WUEN BIT(7)
+
/**
* struct cdns2_adma_regs - ADMA controller registers.
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 4039851551c1..17155ed17fdf 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -965,10 +965,20 @@ static void ucsi_unregister_plug(struct ucsi_connector *con)
static int ucsi_register_cable(struct ucsi_connector *con)
{
+ struct ucsi_cable_property cable_prop;
struct typec_cable *cable;
struct typec_cable_desc desc = {};
+ u64 command;
+ int ret;
+
+ command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &cable_prop, sizeof(cable_prop));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n", ret);
+ return ret;
+ }
- switch (UCSI_CABLE_PROP_FLAG_PLUG_TYPE(con->cable_prop.flags)) {
+ switch (UCSI_CABLE_PROP_FLAG_PLUG_TYPE(cable_prop.flags)) {
case UCSI_CABLE_PROPERTY_PLUG_TYPE_A:
desc.type = USB_PLUG_TYPE_A;
break;
@@ -984,10 +994,10 @@ static int ucsi_register_cable(struct ucsi_connector *con)
}
desc.identity = &con->cable_identity;
- desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE &
- con->cable_prop.flags);
- desc.pd_revision = UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(
- con->cable_prop.flags);
+ desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE & cable_prop.flags);
+
+ if (con->ucsi->version >= UCSI_VERSION_2_1)
+ desc.pd_revision = UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(cable_prop.flags);
cable = typec_register_cable(con->port, &desc);
if (IS_ERR(cable)) {
@@ -1012,6 +1022,27 @@ static void ucsi_unregister_cable(struct ucsi_connector *con)
con->cable = NULL;
}
+static int ucsi_check_connector_capability(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (!con->partner || con->ucsi->version < UCSI_VERSION_2_1)
+ return 0;
+
+ command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
+ return ret;
+ }
+
+ typec_partner_set_pd_revision(con->partner,
+ UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags));
+
+ return ret;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -1021,6 +1052,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0);
ucsi_partner_task(con, ucsi_check_altmodes, 30, HZ);
ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -1064,7 +1096,6 @@ static int ucsi_register_partner(struct ucsi_connector *con)
desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
- desc.pd_revision = UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags);
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -1141,27 +1172,6 @@ static void ucsi_partner_change(struct ucsi_connector *con)
con->num, u_role);
}
-static int ucsi_check_connector_capability(struct ucsi_connector *con)
-{
- u64 command;
- int ret;
-
- if (!con->partner || con->ucsi->version < UCSI_VERSION_2_0)
- return 0;
-
- command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
- if (ret < 0) {
- dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
- return ret;
- }
-
- typec_partner_set_pd_revision(con->partner,
- UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags));
-
- return ret;
-}
-
static int ucsi_check_connection(struct ucsi_connector *con)
{
u8 prev_flags = con->status.flags;
@@ -1193,21 +1203,11 @@ static int ucsi_check_connection(struct ucsi_connector *con)
static int ucsi_check_cable(struct ucsi_connector *con)
{
- u64 command;
int ret, num_plug_am;
if (con->cable)
return 0;
- command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(con->ucsi, command, &con->cable_prop,
- sizeof(con->cable_prop));
- if (ret < 0) {
- dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n",
- ret);
- return ret;
- }
-
ret = ucsi_register_cable(con);
if (ret < 0)
return ret;
@@ -1283,15 +1283,16 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
- ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
if (con->ucsi->cap.features & UCSI_CAP_CABLE_DETAILS)
ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
- UCSI_CONSTAT_PWR_OPMODE_PD)
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
+ }
} else {
ucsi_unregister_partner(con);
}
@@ -1706,6 +1707,7 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
ucsi_register_device_pdos(con);
ucsi_get_src_pdos(con);
ucsi_check_altmodes(con);
+ ucsi_check_connector_capability(con);
}
trace_ucsi_register_port(con->num, &con->status);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 57129f3c0814..5a3481d36d7a 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -465,7 +465,6 @@ struct ucsi_connector {
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
- struct ucsi_cable_property cable_prop;
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index ba46f1c1d78a..dc3a4024aab6 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -1968,8 +1968,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
break;
}
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
percpu_ref_put(&ca->io_ref);
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
@@ -1979,18 +1979,18 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
if (discard_in_flight_add(ca, bucket, false))
return;
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
return;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
- goto put_ioref;
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ goto put_ref;
if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
return;
- bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
-put_ioref:
percpu_ref_put(&ca->io_ref);
+put_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
static int invalidate_one_bucket(struct btree_trans *trans,
@@ -2132,26 +2132,26 @@ static void bch2_do_invalidates_work(struct work_struct *work)
bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
percpu_ref_put(&ca->io_ref);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_dev_do_invalidates(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
return;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
- goto put_ioref;
+ if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+ goto put_ref;
if (queue_work(c->write_ref_wq, &ca->invalidate_work))
return;
- bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
-put_ioref:
percpu_ref_put(&ca->io_ref);
+put_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
void bch2_do_invalidates(struct bch_fs *c)
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 74933490aaba..c1657182c275 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -530,6 +530,8 @@ static void __journal_keys_sort(struct journal_keys *keys)
{
sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
+ cond_resched();
+
struct journal_key *dst = keys->data;
darray_for_each(*keys, src) {
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 20219c1e6ddf..721bbe1dffc1 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -100,12 +100,13 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (!ca) {
- if (fsck_err(trans, ptr_to_invalid_device,
- "pointer to missing device %u\n"
- "while marking %s",
- p.ptr.dev,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
+ trans, ptr_to_invalid_device,
+ "pointer to missing device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
return 0;
}
@@ -562,7 +563,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (unlikely(!ca)) {
- if (insert)
+ if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
ret = -EIO;
goto err;
}
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 90962b3c0130..9baf3411a8f9 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -97,7 +97,9 @@ static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe
const struct bch_extent_ptr *data_ptr,
unsigned sectors)
{
- return data_ptr->dev == stripe_ptr->dev &&
+ return (data_ptr->dev == stripe_ptr->dev ||
+ data_ptr->dev == BCH_SB_MEMBER_INVALID ||
+ stripe_ptr->dev == BCH_SB_MEMBER_INVALID) &&
data_ptr->gen == stripe_ptr->gen &&
data_ptr->offset >= stripe_ptr->offset &&
data_ptr->offset < stripe_ptr->offset + sectors;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index eb31bda19544..324303bf4353 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -781,14 +781,17 @@ static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
/*
* Returns pointer to the next entry after the one being dropped:
*/
-union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+void bch2_bkey_drop_ptr_noerror(struct bkey_s k, struct bch_extent_ptr *ptr)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry = to_entry(ptr), *next;
- union bch_extent_entry *ret = entry;
bool drop_crc = true;
+ if (k.k->type == KEY_TYPE_stripe) {
+ ptr->dev = BCH_SB_MEMBER_INVALID;
+ return;
+ }
+
EBUG_ON(ptr < &ptrs.start->ptr ||
ptr >= &ptrs.end->ptr);
EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
@@ -811,21 +814,16 @@ union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
break;
if ((extent_entry_is_crc(entry) && drop_crc) ||
- extent_entry_is_stripe_ptr(entry)) {
- ret = (void *) ret - extent_entry_bytes(entry);
+ extent_entry_is_stripe_ptr(entry))
extent_entry_drop(k, entry);
- }
}
-
- return ret;
}
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+void bch2_bkey_drop_ptr(struct bkey_s k, struct bch_extent_ptr *ptr)
{
bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
- union bch_extent_entry *ret =
- bch2_bkey_drop_ptr_noerror(k, ptr);
+
+ bch2_bkey_drop_ptr_noerror(k, ptr);
/*
* If we deleted all the dirty pointers and there's still cached
@@ -837,14 +835,10 @@ union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
!bch2_bkey_dirty_devs(k.s_c).nr) {
k.k->type = KEY_TYPE_error;
set_bkey_val_u64s(k.k, 0);
- ret = NULL;
} else if (!bch2_bkey_nr_ptrs(k.s_c)) {
k.k->type = KEY_TYPE_deleted;
set_bkey_val_u64s(k.k, 0);
- ret = NULL;
}
-
- return ret;
}
void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 709dd83183be..42a7c6d820a0 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -649,26 +649,21 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
void bch2_extent_ptr_decoded_append(struct bkey_i *,
struct extent_ptr_decoded *);
-union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
- struct bch_extent_ptr *);
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
- struct bch_extent_ptr *);
+void bch2_bkey_drop_ptr_noerror(struct bkey_s, struct bch_extent_ptr *);
+void bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
do { \
- struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \
+ __label__ _again; \
+ struct bkey_ptrs _ptrs; \
+_again: \
+ _ptrs = bch2_bkey_ptrs(_k); \
\
- struct bch_extent_ptr *_ptr = &_ptrs.start->ptr; \
- \
- while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \
+ bkey_for_each_ptr(_ptrs, _ptr) \
if (_cond) { \
- _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr); \
- _ptrs = bch2_bkey_ptrs(_k); \
- continue; \
+ bch2_bkey_drop_ptr(_k, _ptr); \
+ goto _again; \
} \
- \
- (_ptr)++; \
- } \
} while (0)
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 94c392abef65..257f07656e5f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -177,6 +177,14 @@ static unsigned bch2_inode_hash(subvol_inum inum)
return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL);
}
+struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
+{
+ return to_bch_ei(ilookup5_nowait(c->vfs_sb,
+ bch2_inode_hash(inum),
+ bch2_iget5_test,
+ &inum));
+}
+
static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_inode_info *inode)
{
subvol_inum inum = inode_inum(inode);
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index c3af7225ff69..990ec43e0365 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -56,6 +56,8 @@ static inline subvol_inum inode_inum(struct bch_inode_info *inode)
};
}
+struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *, subvol_inum);
+
/*
* Set if we've gotten a btree error for this inode, and thus the vfs inode and
* btree inode may be inconsistent:
@@ -194,6 +196,11 @@ int bch2_vfs_init(void);
#define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); })
+static inline struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
+{
+ return NULL;
+}
+
static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
snapshot_id_list *s) {}
static inline void bch2_vfs_exit(void) {}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 83bd31b44aad..9b3470a97546 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -8,6 +8,7 @@
#include "darray.h"
#include "dirent.h"
#include "error.h"
+#include "fs.h"
#include "fs-common.h"
#include "fsck.h"
#include "inode.h"
@@ -962,6 +963,22 @@ fsck_err:
return ret;
}
+static bool bch2_inode_open(struct bch_fs *c, struct bpos p)
+{
+ subvol_inum inum = {
+ .subvol = snapshot_t(c, p.snapshot)->subvol,
+ .inum = p.offset,
+ };
+
+ /* snapshot tree corruption, can't safely delete */
+ if (!inum.subvol) {
+ bch_err_ratelimited(c, "%s(): snapshot %u has no subvol", __func__, p.snapshot);
+ return true;
+ }
+
+ return __bch2_inode_hash_find(c, inum) != NULL;
+}
+
static int check_inode(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
@@ -1040,6 +1057,7 @@ static int check_inode(struct btree_trans *trans,
}
if (u.bi_flags & BCH_INODE_unlinked &&
+ !bch2_inode_open(c, k.k->p) &&
(!c->sb.clean ||
fsck_err(trans, inode_unlinked_but_clean,
"filesystem marked clean, but inode %llu unlinked",
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 12d4de65ae17..1f34c92a6d11 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -796,7 +796,7 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
nr_online += test_bit(e->devs[i], devs.d);
struct bch_dev *ca = bch2_dev_rcu(c, e->devs[i]);
- nr_failed += ca && ca->mi.state == BCH_MEMBER_STATE_failed;
+ nr_failed += !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
}
rcu_read_unlock();
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index f393023a3ae2..33f2a64c14c9 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -461,7 +461,7 @@ STORE(bch2_fs)
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_key_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ c->btree_key_cache.shrink->scan_objects(c->btree_key_cache.shrink, &sc);
}
if (attr == &sysfs_trigger_gc)
diff --git a/fs/libfs.c b/fs/libfs.c
index 02602d00939e..b64b4c44cfea 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -2117,12 +2117,12 @@ struct timespec64 simple_inode_init_ts(struct inode *inode)
}
EXPORT_SYMBOL(simple_inode_init_ts);
-static inline struct dentry *get_stashed_dentry(struct dentry *stashed)
+static inline struct dentry *get_stashed_dentry(struct dentry **stashed)
{
struct dentry *dentry;
guard(rcu)();
- dentry = READ_ONCE(stashed);
+ dentry = rcu_dereference(*stashed);
if (!dentry)
return NULL;
if (!lockref_get_not_dead(&dentry->d_lockref))
@@ -2219,7 +2219,7 @@ int path_from_stashed(struct dentry **stashed, struct vfsmount *mnt, void *data,
const struct stashed_operations *sops = mnt->mnt_sb->s_fs_info;
/* See if dentry can be reused. */
- path->dentry = get_stashed_dentry(*stashed);
+ path->dentry = get_stashed_dentry(stashed);
if (path->dentry) {
sops->put_data(data);
goto out_path;
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 943128507af5..d6ada4eba744 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -270,7 +270,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
if (count == remaining)
return;
- _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
+ _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x",
rreq->debug_id, subreq->debug_index,
iov_iter_count(&subreq->io_iter), subreq->transferred,
subreq->len, rreq->i_size,
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 6dce70f17208..cfae2e918209 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1261,16 +1261,32 @@ openRetry:
return rc;
}
+static void cifs_readv_worker(struct work_struct *work)
+{
+ struct cifs_io_subrequest *rdata =
+ container_of(work, struct cifs_io_subrequest, subreq.work);
+
+ netfs_subreq_terminated(&rdata->subreq,
+ (rdata->result == 0 || rdata->result == -EAGAIN) ?
+ rdata->got_bytes : rdata->result, true);
+}
+
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
struct cifs_io_subrequest *rdata = mid->callback_data;
+ struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_iter = rdata->subreq.io_iter };
- struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct cifs_credits credits = {
+ .value = 1,
+ .instance = 0,
+ .rreq_debug_id = rdata->rreq->debug_id,
+ .rreq_debug_index = rdata->subreq.debug_index,
+ };
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
@@ -1282,6 +1298,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
if (server->sign) {
int rc = 0;
+ iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
rc = cifs_verify_signature(&rqst, server,
mid->sequence_number);
if (rc)
@@ -1306,13 +1323,21 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->result = -EIO;
}
- if (rdata->result == 0 || rdata->result == -EAGAIN)
- iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
+ if (rdata->result == -ENODATA) {
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
+ } else {
+ if (rdata->got_bytes < rdata->actual_len &&
+ rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes ==
+ ictx->remote_i_size) {
+ __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
+ rdata->result = 0;
+ }
+ }
+
rdata->credits.value = 0;
- netfs_subreq_terminated(&rdata->subreq,
- (rdata->result == 0 || rdata->result == -EAGAIN) ?
- rdata->got_bytes : rdata->result,
- false);
+ INIT_WORK(&rdata->subreq.work, cifs_readv_worker);
+ queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
add_credits(server, &credits, 0);
}
@@ -1619,9 +1644,15 @@ static void
cifs_writev_callback(struct mid_q_entry *mid)
{
struct cifs_io_subrequest *wdata = mid->callback_data;
+ struct TCP_Server_Info *server = wdata->server;
struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
- struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct cifs_credits credits = {
+ .value = 1,
+ .instance = 0,
+ .rreq_debug_id = wdata->rreq->debug_id,
+ .rreq_debug_index = wdata->subreq.debug_index,
+ };
ssize_t result;
size_t written;
@@ -1657,9 +1688,16 @@ cifs_writev_callback(struct mid_q_entry *mid)
break;
}
+ trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index,
+ wdata->credits.value,
+ server->credits, server->in_flight,
+ 0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
cifs_write_subrequest_terminated(wdata, result, true);
release_mid(mid);
+ trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0,
+ server->credits, server->in_flight,
+ credits.value, cifs_trace_rw_credits_write_response_add);
add_credits(tcon->ses->server, &credits, 0);
}
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index c1c14274930a..5375b0c1dfb9 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -657,6 +657,19 @@ static bool
server_unresponsive(struct TCP_Server_Info *server)
{
/*
+ * If we're in the process of mounting a share or reconnecting a session
+ * and the server abruptly shut down (e.g. socket wasn't closed, packet
+ * had been ACK'ed but no SMB response), don't wait longer than 20s to
+ * negotiate protocol.
+ */
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsInNegotiate &&
+ time_after(jiffies, server->lstrp + 20 * HZ)) {
+ spin_unlock(&server->srv_lock);
+ cifs_reconnect(server, false);
+ return true;
+ }
+ /*
* We need to wait 3 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
@@ -667,7 +680,6 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
- spin_lock(&server->srv_lock);
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) &&
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index dd0afa23734c..73e2e6c230b7 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -172,6 +172,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
CIFS_I(inode)->time = 0; /* force reval */
return -ESTALE;
}
+ if (inode->i_state & I_NEW)
+ CIFS_I(inode)->netfs.zero_point = fattr->cf_eof;
cifs_revalidate_cache(inode, fattr);
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 9f5bc41433c1..11a1c53c64e0 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1106,6 +1106,8 @@ int smb2_rename_path(const unsigned int xid,
co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, from_name,
+ FIND_WR_WITH_DELETE, &cfile);
rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, NULL);
}
@@ -1149,6 +1151,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 4df84ebe8dbe..e6540072ffb0 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -316,7 +316,8 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
cifs_trace_rw_credits_no_adjust_up);
trace_smb3_too_many_credits(server->CurrentMid,
server->conn_id, server->hostname, 0, credits->value - new_val, 0);
- cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
+ cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
+ subreq->rreq->debug_id, subreq->subreq.debug_index,
credits->value, new_val);
return -EOPNOTSUPP;
@@ -338,8 +339,9 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
trace_smb3_reconnect_detected(server->CurrentMid,
server->conn_id, server->hostname, scredits,
credits->value - new_val, in_flight);
- cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
- credits->value - new_val);
+ cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
+ subreq->rreq->debug_id, subreq->subreq.debug_index,
+ credits->value - new_val);
return -EAGAIN;
}
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 5ac237c949a0..34b71e42fb10 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -484,6 +484,7 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp
/**
* kunit_kfree_const() - conditionally free test managed memory
+ * @test: The test context object.
* @x: pointer to the memory
*
* Calls kunit_kfree() only if @x is not in .rodata section.
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 6e76b9dba00e..8a78fabeafc3 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -80,10 +80,12 @@ static __always_inline bool context_tracking_guest_enter(void)
return context_tracking_enabled_this_cpu();
}
-static __always_inline void context_tracking_guest_exit(void)
+static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
__ct_user_exit(CONTEXT_GUEST);
+
+ return context_tracking_enabled_this_cpu();
}
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
@@ -98,7 +100,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline int ct_state(void) { return -1; }
static inline int __ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; }
-static __always_inline void context_tracking_guest_exit(void) { }
+static __always_inline bool context_tracking_guest_exit(void) { return false; }
#define CT_WARN_ON(cond) do { } while (0)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b23c6d48392f..0d5125a3e31a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -485,7 +485,15 @@ static __always_inline void guest_state_enter_irqoff(void)
*/
static __always_inline void guest_context_exit_irqoff(void)
{
- context_tracking_guest_exit();
+ /*
+ * Guest mode is treated as a quiescent state, see
+ * guest_context_enter_irqoff() for more details.
+ */
+ if (!context_tracking_guest_exit()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
}
/*
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index a94bc9e3af96..d0f7d1f36c5e 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1449,6 +1449,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 3fb428ce7d1c..b744e554f014 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 234ad6f16e92..620a5c305123 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -80,23 +80,15 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
- MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
-};
-
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
- MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
- MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
- (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
-};
-
-enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
@@ -112,6 +104,16 @@ enum {
MLX5_OBJ_TYPE_RQT = 0xff0e,
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -313,6 +315,7 @@ enum {
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
MLX5_CMD_OP_MAX
};
@@ -485,7 +488,13 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_66[0x2];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
- u8 reserved_at_6a[0xe];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -522,7 +531,15 @@ struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
@@ -911,7 +928,9 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_8[0x5];
u8 fdb_uplink_hairpin[0x1];
u8 fdb_multi_path_any_table_limit_regc[0x1];
- u8 reserved_at_f[0x3];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
u8 fdb_multi_path_any_table[0x1];
u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
@@ -950,6 +969,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_1900[0x6700];
};
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
@@ -1027,7 +1113,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x20];
+ u8 nic_element_type[0x10];
+ u8 nic_tsar_type[0x10];
u8 reserved_at_120[0x3];
u8 log_meter_aso_granularity[0x5];
@@ -1443,9 +1530,13 @@ enum {
};
enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1650,7 +1741,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pci_sync_for_fw_update_event[0x1];
u8 reserved_at_1f2[0x6];
u8 init2_lag_tx_port_affinity[0x1];
- u8 reserved_at_1fa[0x3];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1764,7 +1856,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0x6];
+ u8 reserved_at_330[0x5];
+ u8 pcie_reset_using_hotreset_method[0x1];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 vport_counter_local_loopback[0x1];
@@ -1959,7 +2052,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_760[0x3];
u8 log_max_num_header_modify_argument[0x5];
- u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity_offset[0x4];
u8 log_header_modify_argument_granularity[0x4];
u8 reserved_at_770[0x3];
u8 log_header_modify_argument_max_alloc[0x5];
@@ -2006,7 +2099,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
- u8 reserved_at_1a8[0x3];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
@@ -2022,6 +2116,16 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x120];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
u8 reserved_at_380[0xb];
u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
@@ -2037,9 +2141,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_400[0x1];
u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
- u8 reserved_at_402[0x1e];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
- u8 reserved_at_420[0x20];
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
u8 reserved_at_440[0x8];
u8 max_num_eqs_24b[0x18];
@@ -2086,7 +2192,7 @@ struct mlx5_ifc_extended_dest_format_bits {
u8 reserved_at_60[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
};
@@ -2178,7 +2284,10 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x80];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_offset[0x40];
u8 headers_mkey[0x20];
@@ -3562,6 +3671,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
@@ -3678,7 +3789,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3919,7 +4030,8 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0xb];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
@@ -3966,6 +4078,7 @@ enum {
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
};
struct mlx5_ifc_scheduling_context_bits {
@@ -4675,6 +4788,12 @@ enum {
TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
};
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+};
+
struct mlx5_ifc_tsar_element_bits {
u8 reserved_at_0[0x8];
u8 tsar_type[0x8];
@@ -4961,6 +5080,16 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6127,7 +6256,8 @@ struct mlx5_ifc_flow_table_context_bits {
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_10[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
@@ -6137,11 +6267,21 @@ struct mlx5_ifc_flow_table_context_bits {
u8 lag_master_next_table_id[0x18];
u8 reserved_at_60[0x60];
+ union {
+ struct {
+ u8 sw_owner_icm_root_1[0x40];
- u8 sw_owner_icm_root_1[0x40];
+ u8 sw_owner_icm_root_0[0x40];
+ } sws;
+ struct {
+ u8 rtc_id_0[0x20];
+
+ u8 rtc_id_1[0x20];
- u8 sw_owner_icm_root_0[0x40];
+ u8 reserved_at_100[0x40];
+ } hws;
+ };
};
struct mlx5_ifc_query_flow_table_out_bits {
@@ -8923,7 +9063,9 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x60];
+ u8 wq_umem_offset[0x40];
+
+ u8 wq_umem_id[0x20];
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
@@ -11048,6 +11190,11 @@ struct mlx5_ifc_mcda_reg_bits {
};
enum {
+ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
+ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
+};
+
+enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
@@ -11074,7 +11221,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
- u8 reserved_at_28[0x4];
+ u8 pci_reset_req_method[0x3];
+ u8 reserved_at_2b[0x1];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index ad1ce650146c..fc7eeff99a8a 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -149,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2e40a137dc12..e87b5e488325 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3567,6 +3567,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
}
/**
+ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
+ * @dev: network device
+ * @qid: stack index of the queue to reset
+ */
+static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
+ u32 qid)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
+}
+
+/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
@@ -3575,7 +3586,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
- netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
+ netdev_tx_reset_subqueue(dev_queue, 0);
}
/**
@@ -3942,6 +3953,8 @@ u8 dev_xdp_prog_count(struct net_device *dev);
int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
+u32 dev_get_min_mp_channel_count(const struct net_device *dev);
+
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/linux/oa_tc6.h b/include/linux/oa_tc6.h
new file mode 100644
index 000000000000..15f58e3c56c7
--- /dev/null
+++ b/include/linux/oa_tc6.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+ *
+ * Author: Parthiban Veerasooran <[email protected]>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/spi/spi.h>
+
+struct oa_tc6;
+
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev);
+void oa_tc6_exit(struct oa_tc6 *tc6);
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value);
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value);
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb);
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
diff --git a/include/linux/pci-pwrctl.h b/include/linux/pci-pwrctl.h
index 45e9cfe740e4..0d23dddf59ec 100644
--- a/include/linux/pci-pwrctl.h
+++ b/include/linux/pci-pwrctl.h
@@ -7,6 +7,7 @@
#define __PCI_PWRCTL_H__
#include <linux/notifier.h>
+#include <linux/workqueue.h>
struct device;
struct device_link;
@@ -41,8 +42,10 @@ struct pci_pwrctl {
/* Private: don't use. */
struct notifier_block nb;
struct device_link *link;
+ struct work_struct work;
};
+void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev);
int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl);
void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl);
int devm_pci_pwrctl_device_set_ready(struct device *dev,
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 2381e07429a2..5c01048860c4 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -598,6 +598,8 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
const struct fwnode_handle *fwnode,
u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_set_fixed_link(struct phylink *,
+ const struct phylink_link_state *);
void phylink_mac_change(struct phylink *, bool up);
void phylink_pcs_change(struct phylink_pcs *, bool up);
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 0aeeae1c1943..ae9bf7479e7b 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -62,6 +62,7 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+#define ASUS_WMI_DEVID_OOBE 0x0005002F
/* This can only be used to disable the screen, not re-enable */
#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
/* Writing a brightness re-enables the screen if disabled */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cf8f6ce06742..39f1d16f3628 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -827,6 +827,8 @@ enum skb_tstamp_type {
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @unreadable: indicates that at least 1 of the fragments in this skb is
+ * unreadable.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
@@ -1008,7 +1010,7 @@ struct sk_buff {
#if IS_ENABLED(CONFIG_IP_SCTP)
__u8 csum_not_inet:1;
#endif
-
+ __u8 unreadable:1;
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
@@ -1433,6 +1435,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
@@ -1823,6 +1826,12 @@ static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
__skb_zcopy_downgrade_managed(skb);
}
+/* Return true if frags in this skb are readable by the host. */
+static inline bool skb_frags_readable(const struct sk_buff *skb)
+{
+ return !skb->unreadable;
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
@@ -2539,10 +2548,17 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
netmem_ref netmem, int off, int size)
{
- struct page *page = netmem_to_page(netmem);
+ struct page *page;
__skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
+ if (netmem_is_net_iov(netmem)) {
+ skb->unreadable = true;
+ return;
+ }
+
+ page = netmem_to_page(netmem);
+
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
@@ -3523,21 +3539,58 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
fragto->offset = fragfrom->offset;
}
+/* Return: true if the skb_frag contains a net_iov. */
+static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
+{
+ return netmem_is_net_iov(frag->netmem);
+}
+
+/**
+ * skb_frag_net_iov - retrieve the net_iov referred to by fragment
+ * @frag: the fragment
+ *
+ * Return: the &struct net_iov associated with @frag. Returns NULL if this
+ * frag has no associated net_iov.
+ */
+static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
+{
+ if (!skb_frag_is_net_iov(frag))
+ return NULL;
+
+ return netmem_to_net_iov(frag->netmem);
+}
+
/**
* skb_frag_page - retrieve the page referred to by a paged fragment
* @frag: the paged fragment
*
- * Returns the &struct page associated with @frag.
+ * Return: the &struct page associated with @frag. Returns NULL if this frag
+ * has no associated page.
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
+ if (skb_frag_is_net_iov(frag))
+ return NULL;
+
return netmem_to_page(frag->netmem);
}
+/**
+ * skb_frag_netmem - retrieve the netmem referred to by a fragment
+ * @frag: the fragment
+ *
+ * Return: the &netmem_ref associated with @frag.
+ */
+static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
+{
+ return frag->netmem;
+}
+
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
struct bpf_prog *prog);
+
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
@@ -3547,6 +3600,9 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
{
+ if (!skb_frag_page(frag))
+ return NULL;
+
return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 16c241a23472..0f3c58007488 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -34,14 +34,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
bool napi_pp_put_page(netmem_ref netmem);
-static inline void
-skb_page_unref(struct page *page, bool recycle)
+static inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page_to_netmem(page)))
+ if (recycle && napi_pp_put_page(netmem))
return;
#endif
- put_page(page);
+ put_page(netmem_to_page(netmem));
}
/**
@@ -54,7 +53,7 @@ skb_page_unref(struct page *page, bool recycle)
*/
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- skb_page_unref(skb_frag_page(frag), recycle);
+ skb_page_unref(skb_frag_netmem(frag), recycle);
}
/**
diff --git a/include/linux/socket.h b/include/linux/socket.h
index df9cdb8bbfb8..d18cc47e89bd 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -327,6 +327,7 @@ struct ucred {
* plain text and require encryption
*/
+#define MSG_SOCK_DEVMEM 0x2000000 /* Receive devmem skbs as cmsg */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 338991c08f00..d79ff252cfdc 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -138,33 +138,6 @@ struct stmmac_txq_cfg {
int tbs_en;
};
-/* FPE link state */
-enum stmmac_fpe_state {
- FPE_STATE_OFF = 0,
- FPE_STATE_CAPABLE = 1,
- FPE_STATE_ENTERING_ON = 2,
- FPE_STATE_ON = 3,
-};
-
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
-enum stmmac_fpe_task_state_t {
- __FPE_REMOVING,
- __FPE_TASK_SCHED,
-};
-
-struct stmmac_fpe_cfg {
- bool enable; /* FPE enable */
- bool hs_enable; /* FPE handshake enable */
- enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
- enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-};
-
struct stmmac_safety_feature_cfg {
u32 tsoee;
u32 mrxpee;
@@ -232,7 +205,6 @@ struct plat_stmmacenet_data {
struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_fpe_cfg *fpe_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
int has_gmac;
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 6c395a2600e8..276ca543ef44 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -173,7 +173,8 @@ retry:
break;
case SKB_GSO_TCPV4:
case SKB_GSO_TCPV6:
- if (skb->csum_offset != offsetof(struct tcphdr, check))
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->csum_offset != offsetof(struct tcphdr, check))
return -EINVAL;
break;
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 192d72c8b465..69ec1eb41a09 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4837,9 +4837,9 @@ struct cfg80211_ops {
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms);
+ u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
- struct net_device *dev);
+ struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -6194,9 +6194,6 @@ enum ieee80211_ap_reg_power {
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
- * @cac_started: true if DFS channel availability check has been started
- * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
- * @cac_time_ms: CAC time in ms
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -6220,6 +6217,11 @@ enum ieee80211_ap_reg_power {
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
+ * @links.cac_started: true if DFS channel availability check has been
+ * started
+ * @links.cac_start_time: timestamp (jiffies) when the dfs state was
+ * entered.
+ * @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
*/
struct wireless_dev {
@@ -6261,11 +6263,6 @@ struct wireless_dev {
u32 owner_nlportid;
bool nl_owner_dead;
- /* FIXME: need to rework radar detection for MLO */
- bool cac_started;
- unsigned long cac_start_time;
- unsigned int cac_time_ms;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -6332,6 +6329,10 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss;
} client;
};
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
};
@@ -8740,6 +8741,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
+ * @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
@@ -8747,7 +8749,8 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
new file mode 100644
index 000000000000..35614f9523f6
--- /dev/null
+++ b/include/net/libeth/tx.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TX_H
+#define __LIBETH_TX_H
+
+#include <linux/skbuff.h>
+
+#include <net/libeth/types.h>
+
+/* Tx buffer completion */
+
+/**
+ * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
+ * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
+ * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
+ * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
+ * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ */
+enum libeth_sqe_type {
+ LIBETH_SQE_EMPTY = 0U,
+ LIBETH_SQE_CTX,
+ LIBETH_SQE_SLAB,
+ LIBETH_SQE_FRAG,
+ LIBETH_SQE_SKB,
+};
+
+/**
+ * struct libeth_sqe - represents a Send Queue Element / Tx buffer
+ * @type: type of the buffer, see the enum above
+ * @rs_idx: index of the last buffer from the batch this one was sent in
+ * @raw: slab buffer to free via kfree()
+ * @skb: &sk_buff to consume
+ * @dma: DMA address to unmap
+ * @len: length of the mapped region to unmap
+ * @nr_frags: number of frags in the frame this buffer belongs to
+ * @packets: number of physical packets sent for this frame
+ * @bytes: number of physical bytes sent for this frame
+ * @priv: driver-private scratchpad
+ */
+struct libeth_sqe {
+ enum libeth_sqe_type type:32;
+ u32 rs_idx;
+
+ union {
+ void *raw;
+ struct sk_buff *skb;
+ };
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+
+ u32 nr_frags;
+ u32 packets;
+ u32 bytes;
+
+ unsigned long priv;
+} __aligned_largest;
+
+/**
+ * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
+ * @p: type or name of the object the driver wants to fit into &libeth_sqe
+ *
+ * Make sure the driver's private data fits into libeth_sqe::priv. To be used
+ * right after its declaration.
+ */
+#define LIBETH_SQE_CHECK_PRIV(p) \
+ static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
+
+/**
+ * struct libeth_cq_pp - completion queue poll params
+ * @dev: &device to perform DMA unmapping
+ * @ss: onstack NAPI stats to fill
+ * @napi: whether it's called from the NAPI context
+ *
+ * libeth uses this structure to access objects needed for performing full
+ * Tx complete operation without passing lots of arguments and change the
+ * prototypes each time a new one is added.
+ */
+struct libeth_cq_pp {
+ struct device *dev;
+ struct libeth_sq_napi_stats *ss;
+
+ bool napi;
+};
+
+/**
+ * libeth_tx_complete - perform Tx completion for one SQE
+ * @sqe: SQE to complete
+ * @cp: poll params
+ *
+ * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
+ * updating the stats etc.
+ */
+static inline void libeth_tx_complete(struct libeth_sqe *sqe,
+ const struct libeth_cq_pp *cp)
+{
+ switch (sqe->type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_SKB:
+ case LIBETH_SQE_FRAG:
+ case LIBETH_SQE_SLAB:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (sqe->type) {
+ case LIBETH_SQE_SKB:
+ cp->ss->packets += sqe->packets;
+ cp->ss->bytes += sqe->bytes;
+
+ napi_consume_skb(sqe->skb, cp->napi);
+ break;
+ case LIBETH_SQE_SLAB:
+ kfree(sqe->raw);
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
new file mode 100644
index 000000000000..603825e45133
--- /dev/null
+++ b/include/net/libeth/types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_TYPES_H
+#define __LIBETH_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_sq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index adfec877f392..954dff901b69 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -6748,8 +6748,11 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @chanctx_conf: Channel context on which radar is detected. Mandatory to
+ * pass a valid pointer during MLO. For non-MLO %NULL can be passed
*/
-void ieee80211_radar_detected(struct ieee80211_hw *hw);
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf);
/**
* ieee80211_chswitch_done - Complete channel switch process
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 0bc4ab03f487..814b5f2e3ed5 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -223,6 +223,8 @@ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
return htonl(0u);
}
+
+void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
#else
static inline void mptcp_init(void)
@@ -307,6 +309,8 @@ static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct reques
}
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+
+static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index aa1716fb0e53..596836abf7bf 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <net/xdp.h>
+#include <net/page_pool/types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
@@ -25,6 +26,7 @@ struct netdev_rx_queue {
* Readers and writers must hold RTNL
*/
struct napi_struct *napi;
+ struct pp_memory_provider_params mp_params;
} ____cacheline_aligned_in_smp;
/*
@@ -43,7 +45,6 @@ __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
return dev->_rx + rxq;
}
-#ifdef CONFIG_SYSFS
static inline unsigned int
get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
{
@@ -53,5 +54,7 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
BUG_ON(index >= dev->num_rx_queues);
return index;
}
-#endif
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+
#endif
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 46cc9b89ac79..8a6e20be4b9d 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -8,6 +8,54 @@
#ifndef _NET_NETMEM_H
#define _NET_NETMEM_H
+#include <linux/mm.h>
+#include <net/net_debug.h>
+
+/* net_iov */
+
+DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
+/* We overload the LSB of the struct page pointer to indicate whether it's
+ * a page or net_iov.
+ */
+#define NET_IOV 0x01UL
+
+struct net_iov {
+ unsigned long __unused_padding;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ struct dmabuf_genpool_chunk_owner *owner;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these fields
+ * without worrying whether the underlying fields belong to a page or net_iov.
+ *
+ * The non-net stack fields of struct page are private to the mm stack and must
+ * never be mirrored to net_iov.
+ */
+#define NET_IOV_ASSERT_OFFSET(pg, iov) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
+NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
+NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NET_IOV_ASSERT_OFFSET
+
+/* netmem */
+
/**
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
@@ -19,20 +67,37 @@
*/
typedef unsigned long __bitwise netmem_ref;
+static inline bool netmem_is_net_iov(const netmem_ref netmem)
+{
+ return (__force unsigned long)netmem & NET_IOV;
+}
+
/* This conversion fails (returns NULL) if the netmem_ref is not struct page
* backed.
- *
- * Currently struct page is the only possible netmem, and this helper never
- * fails.
*/
static inline struct page *netmem_to_page(netmem_ref netmem)
{
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return NULL;
+
return (__force struct page *)netmem;
}
-/* Converting from page to netmem is always safe, because a page can always be
- * a netmem.
- */
+static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return (struct net_iov *)((__force unsigned long)netmem &
+ ~NET_IOV);
+
+ DEBUG_NET_WARN_ON_ONCE(true);
+ return NULL;
+}
+
+static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
+{
+ return (__force netmem_ref)((unsigned long)niov | NET_IOV);
+}
+
static inline netmem_ref page_to_netmem(struct page *page)
{
return (__force netmem_ref)page;
@@ -40,17 +105,70 @@ static inline netmem_ref page_to_netmem(struct page *page)
static inline int netmem_ref_count(netmem_ref netmem)
{
+ /* The non-pp refcount of net_iov is always 1. On net_iov, we only
+ * support pp refcounting which uses the pp_ref_count field.
+ */
+ if (netmem_is_net_iov(netmem))
+ return 1;
+
return page_ref_count(netmem_to_page(netmem));
}
-static inline unsigned long netmem_to_pfn(netmem_ref netmem)
+static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
{
+ if (netmem_is_net_iov(netmem))
+ return 0;
+
return page_to_pfn(netmem_to_page(netmem));
}
+static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
+{
+ return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
+}
+
+static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp;
+}
+
+static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
+{
+ return &__netmem_clear_lsb(netmem)->pp_ref_count;
+}
+
+static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
+{
+ /* NUMA node preference only makes sense if we're allocating
+ * system memory. Memory providers (which give us net_iovs)
+ * choose for us.
+ */
+ if (netmem_is_net_iov(netmem))
+ return true;
+
+ return page_to_nid(netmem_to_page(netmem)) == pref_nid;
+}
+
static inline netmem_ref netmem_compound_head(netmem_ref netmem)
{
+ /* niov are never compounded */
+ if (netmem_is_net_iov(netmem))
+ return netmem;
+
return page_to_netmem(compound_head(netmem_to_page(netmem)));
}
+static inline void *netmem_address(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return NULL;
+
+ return page_address(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->dma_addr;
+}
+
#endif /* _NET_NETMEM_H */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 2b43a893c619..793e6fd78bc5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -216,7 +216,7 @@ page_pool_get_dma_dir(const struct page_pool *pool)
static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
{
- atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr);
+ atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
}
/**
@@ -244,7 +244,7 @@ static inline void page_pool_fragment_page(struct page *page, long nr)
static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
- struct page *page = netmem_to_page(netmem);
+ atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -261,19 +261,19 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_ref_count) == nr) {
+ if (atomic_long_read(pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
* here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ ret = atomic_long_sub_return(nr, pp_ref_count);
WARN_ON(ret < 0);
/* We are the last user here too, reset pp_ref_count back to 1 to
@@ -282,7 +282,7 @@ static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
* page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return ret;
}
@@ -401,9 +401,7 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
{
- struct page *page = netmem_to_page(netmem);
-
- dma_addr_t ret = page->dma_addr;
+ dma_addr_t ret = netmem_get_dma_addr(netmem);
if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
ret <<= PAGE_SHIFT;
@@ -423,24 +421,6 @@ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page));
}
-static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem,
- dma_addr_t addr)
-{
- struct page *page = netmem_to_page(netmem);
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
- page->dma_addr = addr >> PAGE_SHIFT;
-
- /* We assume page alignment to shave off bottom bits,
- * if this "compression" doesn't work we need to drop.
- */
- return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
- }
-
- page->dma_addr = addr;
- return false;
-}
-
/**
* page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
* @pool: &page_pool the @page belongs to
@@ -463,11 +443,6 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
page_pool_get_dma_dir(pool));
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-{
- return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 50569fed7868..c022c410abe3 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -20,8 +20,18 @@
* device driver responsibility
*/
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+
+/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
+ * this must be able to support unreadable netmem, where netmem_address() would
+ * return NULL. This flag should not be set for header page_pools.
+ *
+ * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
+ * page_pool_params.slow.queue_idx.
+ */
+#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
+
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
- PP_FLAG_SYSTEM_POOL)
+ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
/*
* Fast allocation side cache array/stack
@@ -57,7 +67,9 @@ struct pp_alloc_cache {
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
* @slow: params with slowpath access only (initialization and Netlink)
* @netdev: netdev this pool will serve (leave as NULL if none or multiple)
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
+ * @queue_idx: queue idx this page_pool is being created for.
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
+ * PP_FLAG_ALLOW_UNREADABLE_NETMEM.
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
@@ -72,6 +84,7 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int queue_idx;
unsigned int flags;
/* private: used by test code only */
void (*init_callback)(netmem_ref netmem, void *arg);
@@ -139,6 +152,10 @@ struct page_pool_stats {
*/
#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+struct pp_memory_provider_params {
+ void *mp_priv;
+};
+
struct page_pool {
struct page_pool_params_fast p;
@@ -197,6 +214,8 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
diff --git a/include/net/sock.h b/include/net/sock.h
index f51d61fab059..c58ca8dd561b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -337,6 +337,7 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
+ * @sk_user_frags: xarray of pages the user is holding a reference on.
*/
struct sock {
/*
@@ -542,6 +543,7 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct xarray sk_user_frags;
};
struct sock_bh_locked {
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2aac11e7e1cc..f77f812bfbe7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1069,7 +1069,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
- skb_pure_zcopy_same(to, from));
+ skb_pure_zcopy_same(to, from) &&
+ skb_frags_readable(to) == skb_frags_readable(from));
}
static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 54cef89f6c1e..b6bfdc6416c7 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -67,27 +67,27 @@
- instance of a transformer, struct xfrm_state (=SA)
- template to clone xfrm_state, struct xfrm_tmpl
- SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ SPD is organized as hash table (for policies that meet minimum address prefix
+ length setting, net->xfrm.policy_hthresh). Other policies are stored in
+ lists, sorted into rbtree ordered by destination and source address networks.
+ See net/xfrm/xfrm_policy.c for details.
+
(To be compatible with existing pfkeyv2 implementations,
many rules with priority of 0x7fffffff are allowed to exist and
such rules are ordered in an unpredictable way, thanks to bsd folks.)
- Lookup is plain linear search until the first match with selector.
-
If "action" is "block", then we prohibit the flow, otherwise:
if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
policy entry has list of up to XFRM_MAX_DEPTH transformations,
described by templates xfrm_tmpl. Each template is resolved
to a complete xfrm_state (see below) and we pack bundle of transformations
- to a dst_entry returned to requestor.
+ to a dst_entry returned to requester.
dst -. xfrm .-> xfrm_state #1
|---. child .-> dst -. xfrm .-> xfrm_state #2
|---. child .-> dst -. xfrm .-> xfrm_state #3
|---. child .-> NULL
- Bundles are cached at xrfm_policy struct (field ->bundles).
-
Resolution of xrfm_tmpl
-----------------------
@@ -526,6 +526,36 @@ struct xfrm_policy_queue {
unsigned long timeout;
};
+/**
+ * struct xfrm_policy - xfrm policy
+ * @xp_net: network namespace the policy lives in
+ * @bydst: hlist node for SPD hash table or rbtree list
+ * @byidx: hlist node for index hash table
+ * @lock: serialize changes to policy structure members
+ * @refcnt: reference count, freed once it reaches 0
+ * @pos: kernel internal tie-breaker to determine age of policy
+ * @timer: timer
+ * @genid: generation, used to invalidate old policies
+ * @priority: priority, set by userspace
+ * @index: policy index (autogenerated)
+ * @if_id: virtual xfrm interface id
+ * @mark: packet mark
+ * @selector: selector
+ * @lft: liftime configuration data
+ * @curlft: liftime state
+ * @walk: list head on pernet policy list
+ * @polq: queue to hold packets while aqcuire operaion in progress
+ * @bydst_reinsert: policy tree node needs to be merged
+ * @type: XFRM_POLICY_TYPE_MAIN or _SUB
+ * @action: XFRM_POLICY_ALLOW or _BLOCK
+ * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
+ * @xfrm_nr: number of used templates in @xfrm_vec
+ * @family: protocol family
+ * @security: SELinux security label
+ * @xfrm_vec: array of templates to resolve state
+ * @rcu: rcu head, used to defer memory release
+ * @xdo: hardware offload state
+ */
struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
@@ -555,7 +585,6 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
- struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
@@ -1016,7 +1045,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
- u32 if_id; /* interface identifyer */
+ u32 if_id; /* interface identifier */
bool collect_md;
};
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 3ba086f61983..449e93c25184 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -54,7 +54,7 @@ enum sof_comp_type {
struct sof_ipc_comp {
struct sof_ipc_cmd_hdr hdr;
uint32_t id;
- enum sof_comp_type type;
+ uint32_t type;
uint32_t pipeline_id;
uint32_t core;
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 543e54e432a1..31825ed30032 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -57,12 +57,12 @@ TRACE_EVENT(page_pool_state_release,
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
- __entry->pfn = netmem_to_pfn(netmem);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu pfn=0x%lx release=%u",
__entry->pool, (void *)__entry->netmem,
- __entry->pfn, __entry->release)
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -83,12 +83,12 @@ TRACE_EVENT(page_pool_state_hold,
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
- __entry->pfn = netmem_to_pfn(netmem);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu, pfn=0x%lx hold=%u",
__entry->pool, (void *)__entry->netmem,
- __entry->pfn, __entry->hold)
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 1c8bd8e186b8..a27c4b619dff 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -91,10 +91,10 @@ DEFINE_RST_REASON(FN, FN)
TRACE_EVENT(tcp_send_reset,
TP_PROTO(const struct sock *sk,
- const struct sk_buff *skb,
+ const struct sk_buff *skb__nullable,
const enum sk_rst_reason reason),
- TP_ARGS(sk, skb, reason),
+ TP_ARGS(sk, skb__nullable, reason),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -106,7 +106,7 @@ TRACE_EVENT(tcp_send_reset,
),
TP_fast_assign(
- __entry->skbaddr = skb;
+ __entry->skbaddr = skb__nullable;
__entry->skaddr = sk;
/* Zero means unknown state. */
__entry->state = sk ? sk->sk_state : 0;
@@ -118,13 +118,13 @@ TRACE_EVENT(tcp_send_reset,
const struct inet_sock *inet = inet_sk(sk);
TP_STORE_ADDR_PORTS(__entry, inet, sk);
- } else if (skb) {
- const struct tcphdr *th = (const struct tcphdr *)skb->data;
+ } else if (skb__nullable) {
+ const struct tcphdr *th = (const struct tcphdr *)skb__nullable->data;
/*
* We should reverse the 4-tuple of skb, so later
* it can print the right flow direction of rst.
*/
- TP_STORE_ADDR_PORTS_SKB(skb, th, entry->daddr, entry->saddr);
+ TP_STORE_ADDR_PORTS_SKB(skb__nullable, th, entry->daddr, entry->saddr);
}
__entry->reason = reason;
),
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 8ce8a39a1e5f..3b4e3e815602 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -135,6 +135,12 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 926b1deb1116..e23a7f9b0eac 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -692,7 +692,11 @@ enum drm_panthor_group_priority {
/** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
PANTHOR_GROUP_PRIORITY_MEDIUM,
- /** @PANTHOR_GROUP_PRIORITY_HIGH: High priority group. */
+ /**
+ * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
+ *
+ * Requires CAP_SYS_NICE or DRM_MASTER.
+ */
PANTHOR_GROUP_PRIORITY_HIGH,
};
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index 8254c937c9f4..0eca95ccb41e 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -140,25 +140,6 @@
#endif /* _NETINET_IN_H */
-/* Coordinate with glibc netipx/ipx.h header. */
-#if defined(__NETIPX_IPX_H)
-
-#define __UAPI_DEF_SOCKADDR_IPX 0
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 0
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 0
-#define __UAPI_DEF_IPX_CONFIG_DATA 0
-#define __UAPI_DEF_IPX_ROUTE_DEF 0
-
-#else /* defined(__NETIPX_IPX_H) */
-
-#define __UAPI_DEF_SOCKADDR_IPX 1
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
-#define __UAPI_DEF_IPX_CONFIG_DATA 1
-#define __UAPI_DEF_IPX_ROUTE_DEF 1
-
-#endif /* defined(__NETIPX_IPX_H) */
-
/* Definitions for xattr.h */
#if defined(_SYS_XATTR_H)
#define __UAPI_DEF_XATTR 0
@@ -240,23 +221,6 @@
#define __UAPI_DEF_IP6_MTUINFO 1
#endif
-/* Definitions for ipx.h */
-#ifndef __UAPI_DEF_SOCKADDR_IPX
-#define __UAPI_DEF_SOCKADDR_IPX 1
-#endif
-#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
-#define __UAPI_DEF_IPX_ROUTE_DEFINITION 1
-#endif
-#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
-#define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1
-#endif
-#ifndef __UAPI_DEF_IPX_CONFIG_DATA
-#define __UAPI_DEF_IPX_CONFIG_DATA 1
-#endif
-#ifndef __UAPI_DEF_IPX_ROUTE_DEF
-#define __UAPI_DEF_IPX_ROUTE_DEF 1
-#endif
-
/* Definitions for xattr.h */
#ifndef __UAPI_DEF_XATTR
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index c0c8ec995b06..f0d3f268240d 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -23,6 +23,7 @@
#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
#define MDIO_MMD_TC 6 /* Transmission Convergence */
#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_POWER_UNIT 13 /* PHY Power Unit */
#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index a2c66b3d7f0f..858339d1c1c4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -32,8 +32,9 @@ enum {
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16),
+ SOF_TIMESTAMPING_OPT_RX_FILTER = (1 << 17),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_ID_TCP,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_RX_FILTER,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 43742ac5b00d..7c308f04e7a0 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -93,6 +93,7 @@ enum {
NETDEV_A_PAGE_POOL_INFLIGHT,
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
NETDEV_A_PAGE_POOL_DETACH_TIME,
+ NETDEV_A_PAGE_POOL_DMABUF,
__NETDEV_A_PAGE_POOL_MAX,
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
@@ -131,6 +132,7 @@ enum {
NETDEV_A_QUEUE_IFINDEX,
NETDEV_A_QUEUE_TYPE,
NETDEV_A_QUEUE_NAPI_ID,
+ NETDEV_A_QUEUE_DMABUF,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -174,6 +176,16 @@ enum {
};
enum {
+ NETDEV_A_DMABUF_IFINDEX = 1,
+ NETDEV_A_DMABUF_QUEUES,
+ NETDEV_A_DMABUF_FD,
+ NETDEV_A_DMABUF_ID,
+
+ __NETDEV_A_DMABUF_MAX,
+ NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -186,6 +198,7 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
+ NETDEV_CMD_BIND_RX,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/include/uapi/linux/uio.h b/include/uapi/linux/uio.h
index 059b1a9147f4..649739e0c404 100644
--- a/include/uapi/linux/uio.h
+++ b/include/uapi/linux/uio.h
@@ -20,6 +20,24 @@ struct iovec
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
+struct dmabuf_cmsg {
+ __u64 frag_offset; /* offset into the dmabuf where the frag starts.
+ */
+ __u32 frag_size; /* size of the frag. */
+ __u32 frag_token; /* token representing this frag for
+ * DEVMEM_DONTNEED.
+ */
+ __u32 dmabuf_id; /* dmabuf id this frag belongs to. */
+ __u32 flags; /* Currently unused. Reserved for future
+ * uses.
+ */
+};
+
+struct dmabuf_token {
+ __u32 token_start;
+ __u32 token_count;
+};
+
/*
* UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
*/
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index 937ed9408c23..c1b158ec5dab 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -29,7 +29,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
#define SOF_ABI_MINOR 23
-#define SOF_ABI_PATCH 0
+#define SOF_ABI_PATCH 1
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
#define SOF_ABI_MAJOR_SHIFT 24
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 520f49f422fe..ba91be08763a 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -823,9 +823,11 @@ static bool btf_name_valid_section(const struct btf *btf, u32 offset)
const char *src = btf_str_by_offset(btf, offset);
const char *src_limit;
+ if (!*src)
+ return false;
+
/* set a limit on identifier length */
src_limit = src + KSYM_NAME_LEN;
- src++;
while (*src && src < src_limit) {
if (!isprint(*src))
return false;
@@ -6283,7 +6285,7 @@ static struct btf *btf_parse_module(const char *module_name, const void *data,
errout:
btf_verifier_env_free(env);
- if (base_btf != vmlinux_btf)
+ if (!IS_ERR(base_btf) && base_btf != vmlinux_btf)
btf_free(base_btf);
if (btf) {
kvfree(btf->data);
@@ -6523,6 +6525,9 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
if (prog_args_trusted(prog))
info->reg_type |= PTR_TRUSTED;
+ if (btf_param_match_suffix(btf, &args[arg], "__nullable"))
+ info->reg_type |= PTR_MAYBE_NULL;
+
if (tgt_prog) {
enum bpf_prog_type tgt_type;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbdf5a1aabfe..a2f46785ac3b 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -354,12 +354,14 @@ static int cpu_map_kthread_run(void *data)
list_add_tail(&skb->list, &list);
}
- netif_receive_skb_list(&list);
- /* Feedback loop via tracepoint */
+ /* Feedback loop via tracepoint.
+ * NB: keep before recv to allow measuring enqueue/dequeue latency.
+ */
trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
sched, &stats);
+ netif_receive_skb_list(&list);
local_bh_enable(); /* resched point, may call do_softirq() */
}
__set_current_state(TASK_RUNNING);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d8520095ca03..39d5710c68ad 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -28,6 +28,8 @@
#include <linux/cpumask.h>
#include <linux/bpf_mem_alloc.h>
#include <net/xdp.h>
+#include <linux/trace_events.h>
+#include <linux/kallsyms.h>
#include "disasm.h"
@@ -21154,11 +21156,13 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
{
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING;
+ char trace_symbol[KSYM_SYMBOL_LEN];
const char prefix[] = "btf_trace_";
+ struct bpf_raw_event_map *btp;
int ret = 0, subprog = -1, i;
const struct btf_type *t;
bool conservative = true;
- const char *tname;
+ const char *tname, *fname;
struct btf *btf;
long addr = 0;
struct module *mod = NULL;
@@ -21289,10 +21293,34 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
return -EINVAL;
}
tname += sizeof(prefix) - 1;
- t = btf_type_by_id(btf, t->type);
- if (!btf_type_is_ptr(t))
- /* should never happen in valid vmlinux build */
+
+ /* The func_proto of "btf_trace_##tname" is generated from typedef without argument
+ * names. Thus using bpf_raw_event_map to get argument names.
+ */
+ btp = bpf_get_raw_tracepoint(tname);
+ if (!btp)
return -EINVAL;
+ fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL,
+ trace_symbol);
+ bpf_put_raw_tracepoint(btp);
+
+ if (fname)
+ ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC);
+
+ if (!fname || ret < 0) {
+ bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n",
+ prefix, tname);
+ t = btf_type_by_id(btf, t->type);
+ if (!btf_type_is_ptr(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+ } else {
+ t = btf_type_by_id(btf, ret);
+ if (!btf_type_is_func(t))
+ /* should never happen in valid vmlinux build */
+ return -EINVAL;
+ }
+
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
/* should never happen in valid vmlinux build */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c973e3c11e03..8a6c6bbcd658 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1255,8 +1255,9 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::mutex
* perf_event::child_mutex;
* perf_event_context::lock
- * perf_event::mmap_mutex
* mmap_lock
+ * perf_event::mmap_mutex
+ * perf_buffer::aux_mutex
* perf_addr_filters_head::lock
*
* cpu_hotplug_lock
@@ -6373,12 +6374,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
event->pmu->event_unmapped(event, vma->vm_mm);
/*
- * rb->aux_mmap_count will always drop before rb->mmap_count and
- * event->mmap_count, so it is ok to use event->mmap_mutex to
- * serialize with perf_mmap here.
+ * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
+ * to avoid complications.
*/
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
- atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+ atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
/*
* Stop all AUX events that are writing to this buffer,
* so that we can free its AUX pages and corresponding PMU
@@ -6395,7 +6395,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
rb_free_aux(rb);
WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
- mutex_unlock(&event->mmap_mutex);
+ mutex_unlock(&rb->aux_mutex);
}
if (atomic_dec_and_test(&rb->mmap_count))
@@ -6483,6 +6483,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ struct mutex *aux_mutex = NULL;
struct perf_buffer *rb = NULL;
unsigned long locked, lock_limit;
unsigned long vma_size;
@@ -6531,6 +6532,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!rb)
goto aux_unlock;
+ aux_mutex = &rb->aux_mutex;
+ mutex_lock(aux_mutex);
+
aux_offset = READ_ONCE(rb->user_page->aux_offset);
aux_size = READ_ONCE(rb->user_page->aux_size);
@@ -6681,6 +6685,8 @@ unlock:
atomic_dec(&rb->mmap_count);
}
aux_unlock:
+ if (aux_mutex)
+ mutex_unlock(aux_mutex);
mutex_unlock(&event->mmap_mutex);
/*
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 451514442a1b..e072d995d670 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -40,6 +40,7 @@ struct perf_buffer {
struct user_struct *mmap_user;
/* AUX area */
+ struct mutex aux_mutex;
long aux_head;
unsigned int aux_nest;
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 8cadf97bc290..4f46f688d0d4 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -337,6 +337,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
*/
if (!rb->nr_pages)
rb->paused = 1;
+
+ mutex_init(&rb->aux_mutex);
}
void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 73cc47708679..50d7949be2b1 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1489,7 +1489,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
struct xol_area *area;
void *insns;
- area = kmalloc(sizeof(*area), GFP_KERNEL);
+ area = kzalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
goto out;
@@ -1499,7 +1499,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
goto free_area;
area->xol_mapping.name = "[uprobes]";
- area->xol_mapping.fault = NULL;
area->xol_mapping.pages = area->pages;
area->pages[0] = alloc_page(GFP_HIGHUSER);
if (!area->pages[0])
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c22b07049c38..6ff8d47e145f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2620,6 +2620,7 @@ int match_devname_and_update_preferred_console(const char *devname,
return -ENOENT;
}
+EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console);
bool console_suspend_enabled = true;
EXPORT_SYMBOL(console_suspend_enabled);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index edf6bc817aa1..c3b2c7dfadef 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2226,10 +2226,6 @@ static __init int init_trace_selftests(void)
}
core_initcall(init_trace_selftests);
#else
-static inline int run_tracer_selftest(struct tracer *type)
-{
- return 0;
-}
static inline int do_run_tracer_selftest(struct tracer *type)
{
return 0;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index bbe47781617e..7e75c1214b36 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -228,6 +228,11 @@ static inline struct osnoise_variables *this_cpu_osn_var(void)
return this_cpu_ptr(&per_cpu_osnoise_var);
}
+/*
+ * Protect the interface.
+ */
+static struct mutex interface_lock;
+
#ifdef CONFIG_TIMERLAT_TRACER
/*
* Runtime information for the timer mode.
@@ -253,11 +258,6 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void)
}
/*
- * Protect the interface.
- */
-static struct mutex interface_lock;
-
-/*
* tlat_var_reset - Reset the values of the given timerlat_variables
*/
static inline void tlat_var_reset(void)
diff --git a/mm/memory.c b/mm/memory.c
index 3c01d68065be..ebfc9768f801 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2632,11 +2632,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
return 0;
}
-/*
- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
- * must have pre-validated the caching bits of the pgprot_t.
- */
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
@@ -2689,6 +2685,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
return 0;
}
+/*
+ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+ * must have pre-validated the caching bits of the pgprot_t.
+ */
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
+
+ if (!error)
+ return 0;
+
+ /*
+ * A partial pfn range mapping is dangerous: it does not
+ * maintain page reference counts, and callers may free
+ * pages due to the error. So zap it early.
+ */
+ zap_page_range_single(vma, addr, size, NULL);
+ return error;
+}
+
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to
diff --git a/net/Kconfig b/net/Kconfig
index d27d0deac0bf..a629f92dc86b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -66,6 +66,12 @@ config SKB_DECRYPTED
config SKB_EXTENSIONS
bool
+config NET_DEVMEM
+ def_bool y
+ depends on DMA_SHARED_BUFFER
+ depends on GENERIC_ALLOCATOR
+ depends on PAGE_POOL
+
menu "Networking options"
source "net/packet/Kconfig"
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 47901bd4def1..94ad09e36df2 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -47,7 +47,6 @@ struct chnl_net {
struct caif_connect_request conn_req;
struct list_head list_field;
struct net_device *netdev;
- char name[256];
wait_queue_head_t netmgmt_wq;
/* Flow status to remember and control the transmission. */
bool flowenabled;
@@ -347,7 +346,6 @@ static int chnl_net_init(struct net_device *dev)
struct chnl_net *priv;
ASSERT_RTNL();
priv = netdev_priv(dev);
- strncpy(priv->name, dev->name, sizeof(priv->name));
INIT_LIST_HEAD(&priv->list_field);
return 0;
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 62be9aef2528..c3ebbaf9c81e 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-y += hotdata.o
+obj-y += netdev_rx_queue.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o page_pool_user.o
obj-$(CONFIG_PROC_FS) += net-procfs.o
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
@@ -43,3 +44,4 @@ obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
obj-$(CONFIG_NET_TEST) += net_test.o
+obj-$(CONFIG_NET_DEVMEM) += devmem.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index a40f733b37d7..f0693707aece 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -407,6 +407,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
return 0;
}
+ if (!skb_frags_readable(skb))
+ goto short_copy;
+
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -623,6 +626,9 @@ int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
{
int frag = skb_shinfo(skb)->nr_frags;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
while (length && iov_iter_count(from)) {
struct page *head, *last_head = NULL;
struct page *pages[MAX_SKB_FRAGS];
diff --git a/net/core/dev.c b/net/core/dev.c
index 8f4dead64284..1e740faf9e78 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -161,6 +161,7 @@
#include <linux/phy_link_topology.h>
#include "dev.h"
+#include "devmem.h"
#include "net-sysfs.h"
static DEFINE_SPINLOCK(ptype_lock);
@@ -3311,6 +3312,10 @@ int skb_checksum_help(struct sk_buff *skb)
return -EINVAL;
}
+ if (!skb_frags_readable(skb)) {
+ return -EFAULT;
+ }
+
/* Before computing a checksum, we should make sure no frag could
* be modified by an external entity : checksum could be wrong.
*/
@@ -3433,8 +3438,9 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!(dev->features & NETIF_F_HIGHDMA)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
- if (PageHighMem(skb_frag_page(frag)))
+ if (page && PageHighMem(page))
return 1;
}
}
@@ -9367,6 +9373,11 @@ int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf)
if (!dev->netdev_ops->ndo_bpf)
return -EOPNOTSUPP;
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(bpf->extack, "unable to propagate XDP to device using memory provider");
+ return -EBUSY;
+ }
+
return dev->netdev_ops->ndo_bpf(dev, bpf);
}
EXPORT_SYMBOL_GPL(dev_xdp_propagate);
@@ -9399,6 +9410,11 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
struct netdev_bpf xdp;
int err;
+ if (dev_get_min_mp_channel_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to install XDP to device using memory provider");
+ return -EBUSY;
+ }
+
memset(&xdp, 0, sizeof(xdp));
xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
xdp.extack = extack;
@@ -9823,6 +9839,20 @@ err_out:
return err;
}
+u32 dev_get_min_mp_channel_count(const struct net_device *dev)
+{
+ int i;
+
+ ASSERT_RTNL();
+
+ for (i = dev->real_num_rx_queues - 1; i >= 0; i--)
+ if (dev->_rx[i].mp_params.mp_priv)
+ /* The channel count is the idx plus 1. */
+ return i + 1;
+
+ return 0;
+}
+
/**
* dev_index_reserve() - allocate an ifindex in a namespace
* @net: the applicable net namespace
@@ -11359,6 +11389,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
dev_tcx_uninstall(dev);
dev_xdp_uninstall(dev);
bpf_dev_bound_netdev_unregister(dev);
+ dev_dmabuf_uninstall(dev);
netdev_offload_xstats_disable_all(dev);
diff --git a/net/core/devmem.c b/net/core/devmem.c
new file mode 100644
index 000000000000..11b91c12ee11
--- /dev/null
+++ b/net/core/devmem.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Devmem TCP
+ *
+ * Authors: Mina Almasry <[email protected]>
+ * Willem de Bruijn <[email protected]>
+ * Kaiyuan Zhang <[email protected]
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/genalloc.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/page_pool/helpers.h>
+#include <trace/events/page_pool.h>
+
+#include "devmem.h"
+#include "mp_dmabuf_devmem.h"
+#include "page_pool_priv.h"
+
+/* Device memory support */
+
+/* Protected by rtnl_lock() */
+static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
+
+static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
+ struct gen_pool_chunk *chunk,
+ void *not_used)
+{
+ struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
+
+ kvfree(owner->niovs);
+ kfree(owner);
+}
+
+static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_dma_addr +
+ ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+ size_t size, avail;
+
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+
+ size = gen_pool_size(binding->chunk_pool);
+ avail = gen_pool_avail(binding->chunk_pool);
+
+ if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
+ size, avail))
+ gen_pool_destroy(binding->chunk_pool);
+
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+ dma_buf_detach(binding->dmabuf, binding->attachment);
+ dma_buf_put(binding->dmabuf);
+ xa_destroy(&binding->bound_rxqs);
+ kfree(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct dmabuf_genpool_chunk_owner *owner;
+ unsigned long dma_addr;
+ struct net_iov *niov;
+ ssize_t offset;
+ ssize_t index;
+
+ dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
+ (void **)&owner);
+ if (!dma_addr)
+ return NULL;
+
+ offset = dma_addr - owner->base_dma_addr;
+ index = offset / PAGE_SIZE;
+ niov = &owner->niovs[index];
+
+ niov->pp_magic = 0;
+ niov->pp = NULL;
+ atomic_long_set(&niov->pp_ref_count, 0);
+
+ return niov;
+}
+
+void net_devmem_free_dmabuf(struct net_iov *niov)
+{
+ struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov);
+ unsigned long dma_addr = net_devmem_get_dma_addr(niov);
+
+ if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
+ PAGE_SIZE)))
+ return;
+
+ gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
+}
+
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int rxq_idx;
+
+ if (binding->list.next)
+ list_del(&binding->list);
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
+ WARN_ON(rxq->mp_params.mp_priv != binding);
+
+ rxq->mp_params.mp_priv = NULL;
+
+ rxq_idx = get_netdev_rx_queue_index(rxq);
+
+ WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
+ }
+
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_rx_queue *rxq;
+ u32 xa_idx;
+ int err;
+
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+
+ rxq = __netif_get_rx_queue(dev, rxq_idx);
+ if (rxq->mp_params.mp_priv) {
+ NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+ return -EEXIST;
+ }
+
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool) {
+ NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+ return -EBUSY;
+ }
+#endif
+
+ err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ rxq->mp_params.mp_priv = binding;
+
+ err = netdev_rx_queue_restart(dev, rxq_idx);
+ if (err)
+ goto err_xa_erase;
+
+ return 0;
+
+err_xa_erase:
+ rxq->mp_params.mp_priv = NULL;
+ xa_erase(&binding->bound_rxqs, xa_idx);
+
+ return err;
+}
+
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ static u32 id_alloc_next;
+ struct scatterlist *sg;
+ struct dma_buf *dmabuf;
+ unsigned int sg_idx, i;
+ unsigned long virtual;
+ int err;
+
+ dmabuf = dma_buf_get(dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_put_dmabuf;
+ }
+
+ binding->dev = dev;
+
+ err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
+ binding, xa_limit_32b, &id_alloc_next,
+ GFP_KERNEL);
+ if (err < 0)
+ goto err_free_binding;
+
+ xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
+
+ refcount_set(&binding->ref, 1);
+
+ binding->dmabuf = dmabuf;
+
+ binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
+ if (IS_ERR(binding->attachment)) {
+ err = PTR_ERR(binding->attachment);
+ NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
+ goto err_free_id;
+ }
+
+ binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
+ DMA_FROM_DEVICE);
+ if (IS_ERR(binding->sgt)) {
+ err = PTR_ERR(binding->sgt);
+ NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
+ goto err_detach;
+ }
+
+ /* For simplicity we expect to make PAGE_SIZE allocations, but the
+ * binding can be much more flexible than that. We may be able to
+ * allocate MTU sized chunks here. Leave that for future work...
+ */
+ binding->chunk_pool =
+ gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
+ if (!binding->chunk_pool) {
+ err = -ENOMEM;
+ goto err_unmap;
+ }
+
+ virtual = 0;
+ for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
+ dma_addr_t dma_addr = sg_dma_address(sg);
+ struct dmabuf_genpool_chunk_owner *owner;
+ size_t len = sg_dma_len(sg);
+ struct net_iov *niov;
+
+ owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
+ dev_to_node(&dev->dev));
+ if (!owner) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ owner->base_virtual = virtual;
+ owner->base_dma_addr = dma_addr;
+ owner->num_niovs = len / PAGE_SIZE;
+ owner->binding = binding;
+
+ err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
+ dma_addr, len, dev_to_node(&dev->dev),
+ owner);
+ if (err) {
+ kfree(owner);
+ err = -EINVAL;
+ goto err_free_chunks;
+ }
+
+ owner->niovs = kvmalloc_array(owner->num_niovs,
+ sizeof(*owner->niovs),
+ GFP_KERNEL);
+ if (!owner->niovs) {
+ err = -ENOMEM;
+ goto err_free_chunks;
+ }
+
+ for (i = 0; i < owner->num_niovs; i++) {
+ niov = &owner->niovs[i];
+ niov->owner = owner;
+ page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
+ net_devmem_get_dma_addr(niov));
+ }
+
+ virtual += len;
+ }
+
+ return binding;
+
+err_free_chunks:
+ gen_pool_for_each_chunk(binding->chunk_pool,
+ net_devmem_dmabuf_free_chunk_owner, NULL);
+ gen_pool_destroy(binding->chunk_pool);
+err_unmap:
+ dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
+ DMA_FROM_DEVICE);
+err_detach:
+ dma_buf_detach(dmabuf, binding->attachment);
+err_free_id:
+ xa_erase(&net_devmem_dmabuf_bindings, binding->id);
+err_free_binding:
+ kfree(binding);
+err_put_dmabuf:
+ dma_buf_put(dmabuf);
+ return ERR_PTR(err);
+}
+
+void dev_dmabuf_uninstall(struct net_device *dev)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct netdev_rx_queue *rxq;
+ unsigned long xa_idx;
+ unsigned int i;
+
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ binding = dev->_rx[i].mp_params.mp_priv;
+ if (!binding)
+ continue;
+
+ xa_for_each(&binding->bound_rxqs, xa_idx, rxq)
+ if (rxq == &dev->_rx[i]) {
+ xa_erase(&binding->bound_rxqs, xa_idx);
+ break;
+ }
+ }
+}
+
+/*** "Dmabuf devmem memory provider" ***/
+
+int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ if (!binding)
+ return -EINVAL;
+
+ if (!pool->dma_map)
+ return -EOPNOTSUPP;
+
+ if (pool->dma_sync)
+ return -EOPNOTSUPP;
+
+ if (pool->p.order != 0)
+ return -E2BIG;
+
+ net_devmem_dmabuf_binding_get(binding);
+ return 0;
+}
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+ struct net_iov *niov;
+ netmem_ref netmem;
+
+ niov = net_devmem_alloc_dmabuf(binding);
+ if (!niov)
+ return 0;
+
+ netmem = net_iov_to_netmem(niov);
+
+ page_pool_set_pp_info(pool, netmem);
+
+ pool->pages_state_hold_cnt++;
+ trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
+ return netmem;
+}
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
+
+ net_devmem_dmabuf_binding_put(binding);
+}
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
+
+ if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+ return false;
+
+ if (WARN_ON_ONCE(refcount != 1))
+ return false;
+
+ page_pool_clear_pp_info(netmem);
+
+ net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
+
+ /* We don't want the page pool put_page()ing our net_iovs. */
+ return false;
+}
diff --git a/net/core/devmem.h b/net/core/devmem.h
new file mode 100644
index 000000000000..76099ef9c482
--- /dev/null
+++ b/net/core/devmem.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Device memory TCP support
+ *
+ * Authors: Mina Almasry <[email protected]>
+ * Willem de Bruijn <[email protected]>
+ * Kaiyuan Zhang <[email protected]>
+ *
+ */
+#ifndef _NET_DEVMEM_H
+#define _NET_DEVMEM_H
+
+struct netlink_ext_ack;
+
+struct net_devmem_dmabuf_binding {
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct net_device *dev;
+ struct gen_pool *chunk_pool;
+
+ /* The user holds a ref (via the netlink API) for as long as they want
+ * the binding to remain alive. Each page pool using this binding holds
+ * a ref to keep the binding alive. Each allocated net_iov holds a
+ * ref.
+ *
+ * The binding undos itself and unmaps the underlying dmabuf once all
+ * those refs are dropped and the binding is no longer desired or in
+ * use.
+ */
+ refcount_t ref;
+
+ /* The list of bindings currently active. Used for netlink to notify us
+ * of the user dropping the bind.
+ */
+ struct list_head list;
+
+ /* rxq's this binding is active on. */
+ struct xarray bound_rxqs;
+
+ /* ID of this binding. Globally unique to all bindings currently
+ * active.
+ */
+ u32 id;
+};
+
+#if defined(CONFIG_NET_DEVMEM)
+/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
+ * entry from the dmabuf is inserted into the genpool as a chunk, and needs
+ * this owner struct to keep track of some metadata necessary to create
+ * allocations from this chunk.
+ */
+struct dmabuf_genpool_chunk_owner {
+ /* Offset into the dma-buf where this chunk starts. */
+ unsigned long base_virtual;
+
+ /* dma_addr of the start of the chunk. */
+ dma_addr_t base_dma_addr;
+
+ /* Array of net_iovs for this chunk. */
+ struct net_iov *niovs;
+ size_t num_niovs;
+
+ struct net_devmem_dmabuf_binding *binding;
+};
+
+void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
+struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack);
+void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
+int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack);
+void dev_dmabuf_uninstall(struct net_device *dev);
+
+static inline struct dmabuf_genpool_chunk_owner *
+net_iov_owner(const struct net_iov *niov)
+{
+ return niov->owner;
+}
+
+static inline unsigned int net_iov_idx(const struct net_iov *niov)
+{
+ return niov - net_iov_owner(niov)->niovs;
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_iov_binding(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding;
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
+
+ return owner->base_virtual +
+ ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return net_iov_owner(niov)->binding->id;
+}
+
+static inline void
+net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
+{
+ refcount_inc(&binding->ref);
+}
+
+static inline void
+net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
+{
+ if (!refcount_dec_and_test(&binding->ref))
+ return;
+
+ __net_devmem_dmabuf_binding_free(binding);
+}
+
+struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
+void net_devmem_free_dmabuf(struct net_iov *ppiov);
+
+#else
+struct net_devmem_dmabuf_binding;
+
+static inline void
+__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline struct net_devmem_dmabuf_binding *
+net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
+ struct netlink_ext_ack *extack)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+}
+
+static inline int
+net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
+ struct net_devmem_dmabuf_binding *binding,
+ struct netlink_ext_ack *extack)
+
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void dev_dmabuf_uninstall(struct net_device *dev)
+{
+}
+
+static inline struct net_iov *
+net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
+{
+ return NULL;
+}
+
+static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
+{
+}
+
+static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
+{
+ return 0;
+}
+
+static inline u32 net_iov_binding_id(const struct net_iov *niov)
+{
+ return 0;
+}
+#endif
+
+#endif /* _NET_DEVMEM_H */
diff --git a/net/core/filter.c b/net/core/filter.c
index 8569cd2482ee..0f4d9f3b206e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -12063,7 +12063,7 @@ int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
}
BTF_KFUNCS_START(bpf_kfunc_check_set_skb)
-BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_kfunc_check_set_skb)
BTF_KFUNCS_START(bpf_kfunc_check_set_xdp)
@@ -12112,6 +12112,7 @@ static int __init bpf_kfunc_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
&bpf_kfunc_set_sock_addr);
diff --git a/net/core/gro.c b/net/core/gro.c
index 3abad1b567dd..802b4a062400 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -408,7 +408,8 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
pinfo = skb_shinfo(skb);
frag0 = &pinfo->frags[0];
- if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
+ if (pinfo->nr_frags && skb_frag_page(frag0) &&
+ !PageHighMem(skb_frag_page(frag0)) &&
(!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
diff --git a/net/core/mp_dmabuf_devmem.h b/net/core/mp_dmabuf_devmem.h
new file mode 100644
index 000000000000..67cd0dd7319c
--- /dev/null
+++ b/net/core/mp_dmabuf_devmem.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Dmabuf device memory provider.
+ *
+ * Authors: Mina Almasry <[email protected]>
+ *
+ */
+#ifndef _NET_MP_DMABUF_DEVMEM_H
+#define _NET_MP_DMABUF_DEVMEM_H
+
+#include <net/netmem.h>
+
+#if defined(CONFIG_NET_DEVMEM)
+int mp_dmabuf_devmem_init(struct page_pool *pool);
+
+netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp);
+
+void mp_dmabuf_devmem_destroy(struct page_pool *pool);
+
+bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem);
+#else
+static inline int mp_dmabuf_devmem_init(struct page_pool *pool)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline netmem_ref
+mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool)
+{
+}
+
+static inline bool
+mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
+{
+ return false;
+}
+#endif
+
+#endif /* _NET_MP_DMABUF_DEVMEM_H */
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index 8350a0afa9ec..b28424ae06d5 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -9,6 +9,7 @@
#include "netdev-genl-gen.h"
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Integer value ranges */
static const struct netlink_range_validation netdev_a_page_pool_id_range = {
@@ -27,6 +28,11 @@ const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFIND
[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
};
+const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1] = {
+ [NETDEV_A_QUEUE_ID] = { .type = NLA_U32, },
+ [NETDEV_A_QUEUE_TYPE] = NLA_POLICY_MAX(NLA_U32, 1),
+};
+
/* NETDEV_CMD_DEV_GET - do */
static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
[NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
@@ -74,6 +80,13 @@ static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE
[NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
};
+/* NETDEV_CMD_BIND_RX - do */
+static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1] = {
+ [NETDEV_A_DMABUF_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NETDEV_A_DMABUF_FD] = { .type = NLA_U32, },
+ [NETDEV_A_DMABUF_QUEUES] = NLA_POLICY_NESTED(netdev_queue_id_nl_policy),
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -151,6 +164,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_QSTATS_SCOPE,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_BIND_RX,
+ .doit = netdev_nl_bind_rx_doit,
+ .policy = netdev_bind_rx_nl_policy,
+ .maxattr = NETDEV_A_DMABUF_FD,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
@@ -168,4 +188,7 @@ struct genl_family netdev_nl_family __ro_after_init = {
.n_split_ops = ARRAY_SIZE(netdev_nl_ops),
.mcgrps = netdev_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
+ .sock_priv_size = sizeof(struct list_head),
+ .sock_priv_init = (void *)netdev_nl_sock_priv_init,
+ .sock_priv_destroy = (void *)netdev_nl_sock_priv_destroy,
};
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index 4db40fd5b4a9..8cda334fd042 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -10,9 +10,11 @@
#include <net/genetlink.h>
#include <uapi/linux/netdev.h>
+#include <linux/list.h>
/* Common nested types */
extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1];
+extern const struct nla_policy netdev_queue_id_nl_policy[NETDEV_A_QUEUE_TYPE + 1];
int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
@@ -30,6 +32,7 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info);
enum {
NETDEV_NLGRP_MGMT,
@@ -38,4 +41,7 @@ enum {
extern struct genl_family netdev_nl_family;
+void netdev_nl_sock_priv_init(struct list_head *priv);
+void netdev_nl_sock_priv_destroy(struct list_head *priv);
+
#endif /* _LINUX_NETDEV_GEN_H */
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index a17d7eaeb001..1cb954f2d39e 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -3,16 +3,17 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
+#include <net/busy_poll.h>
#include <net/net_namespace.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/sock.h>
#include <net/xdp.h>
#include <net/xdp_sock.h>
-#include <net/netdev_rx_queue.h>
-#include <net/netdev_queues.h>
-#include <net/busy_poll.h>
-#include "netdev-genl-gen.h"
#include "dev.h"
+#include "devmem.h"
+#include "netdev-genl-gen.h"
struct netdev_nl_dump_ctx {
unsigned long ifindex;
@@ -294,6 +295,7 @@ static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding;
struct netdev_rx_queue *rxq;
struct netdev_queue *txq;
void *hdr;
@@ -313,6 +315,12 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID,
rxq->napi->napi_id))
goto nla_put_failure;
+
+ binding = rxq->mp_params.mp_priv;
+ if (binding &&
+ nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id))
+ goto nla_put_failure;
+
break;
case NETDEV_QUEUE_TYPE_TX:
txq = netdev_get_tx_queue(netdev, q_idx);
@@ -723,6 +731,129 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
return err;
}
+int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)];
+ struct net_devmem_dmabuf_binding *binding;
+ struct list_head *sock_binding_list;
+ u32 ifindex, dmabuf_fd, rxq_idx;
+ struct net_device *netdev;
+ struct sk_buff *rsp;
+ struct nlattr *attr;
+ int rem, err = 0;
+ void *hdr;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) ||
+ GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES))
+ return -EINVAL;
+
+ ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]);
+ dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]);
+
+ sock_binding_list = genl_sk_priv_get(&netdev_nl_family,
+ NETLINK_CB(skb).sk);
+ if (IS_ERR(sock_binding_list))
+ return PTR_ERR(sock_binding_list);
+
+ rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(rsp, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_genlmsg_free;
+ }
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(genl_info_net(info), ifindex);
+ if (!netdev || !netif_device_present(netdev)) {
+ err = -ENODEV;
+ goto err_unlock;
+ }
+
+ if (dev_xdp_prog_count(netdev)) {
+ NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
+ err = -EEXIST;
+ goto err_unlock;
+ }
+
+ binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
+ if (IS_ERR(binding)) {
+ err = PTR_ERR(binding);
+ goto err_unlock;
+ }
+
+ nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES,
+ genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ err = nla_parse_nested(
+ tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr,
+ netdev_queue_id_nl_policy, info->extack);
+ if (err < 0)
+ goto err_unbind;
+
+ if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) ||
+ NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) {
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) {
+ NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]);
+ err = -EINVAL;
+ goto err_unbind;
+ }
+
+ rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]);
+
+ err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding,
+ info->extack);
+ if (err)
+ goto err_unbind;
+ }
+
+ list_add(&binding->list, sock_binding_list);
+
+ nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id);
+ genlmsg_end(rsp, hdr);
+
+ err = genlmsg_reply(rsp, info);
+ if (err)
+ goto err_unbind;
+
+ rtnl_unlock();
+
+ return 0;
+
+err_unbind:
+ net_devmem_unbind_dmabuf(binding);
+err_unlock:
+ rtnl_unlock();
+err_genlmsg_free:
+ nlmsg_free(rsp);
+ return err;
+}
+
+void netdev_nl_sock_priv_init(struct list_head *priv)
+{
+ INIT_LIST_HEAD(priv);
+}
+
+void netdev_nl_sock_priv_destroy(struct list_head *priv)
+{
+ struct net_devmem_dmabuf_binding *binding;
+ struct net_devmem_dmabuf_binding *temp;
+
+ list_for_each_entry_safe(binding, temp, priv, list) {
+ rtnl_lock();
+ net_devmem_unbind_dmabuf(binding);
+ rtnl_unlock();
+ }
+}
+
static int netdev_genl_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
new file mode 100644
index 000000000000..e217a5838c87
--- /dev/null
+++ b/net/core/netdev_rx_queue.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/netdevice.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+
+#include "page_pool_priv.h"
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx);
+ void *new_mem, *old_mem;
+ int err;
+
+ if (!dev->queue_mgmt_ops || !dev->queue_mgmt_ops->ndo_queue_stop ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_free ||
+ !dev->queue_mgmt_ops->ndo_queue_mem_alloc ||
+ !dev->queue_mgmt_ops->ndo_queue_start)
+ return -EOPNOTSUPP;
+
+ ASSERT_RTNL();
+
+ new_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!new_mem)
+ return -ENOMEM;
+
+ old_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL);
+ if (!old_mem) {
+ err = -ENOMEM;
+ goto err_free_new_mem;
+ }
+
+ err = dev->queue_mgmt_ops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_free_old_mem;
+
+ err = page_pool_check_memory_provider(dev, rxq);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx);
+ if (err)
+ goto err_free_new_queue_mem;
+
+ err = dev->queue_mgmt_ops->ndo_queue_start(dev, new_mem, rxq_idx);
+ if (err)
+ goto err_start_queue;
+
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+
+ kvfree(old_mem);
+ kvfree(new_mem);
+
+ return 0;
+
+err_start_queue:
+ /* Restarting the queue with old_mem should be successful as we haven't
+ * changed any of the queue configuration, and there is not much we can
+ * do to recover from a failure here.
+ *
+ * WARN if we fail to recover the old rx queue, and at least free
+ * old_mem so we don't also leak that.
+ */
+ if (dev->queue_mgmt_ops->ndo_queue_start(dev, old_mem, rxq_idx)) {
+ WARN(1,
+ "Failed to restart old queue in error path. RX queue %d may be unhealthy.",
+ rxq_idx);
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem);
+ }
+
+err_free_new_queue_mem:
+ dev->queue_mgmt_ops->ndo_queue_mem_free(dev, new_mem);
+
+err_free_old_mem:
+ kvfree(old_mem);
+
+err_free_new_mem:
+ kvfree(new_mem);
+
+ return err;
+}
diff --git a/net/core/netmem_priv.h b/net/core/netmem_priv.h
new file mode 100644
index 000000000000..7eadb8393e00
--- /dev/null
+++ b/net/core/netmem_priv.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NETMEM_PRIV_H
+#define __NETMEM_PRIV_H
+
+static inline unsigned long netmem_get_pp_magic(netmem_ref netmem)
+{
+ return __netmem_clear_lsb(netmem)->pp_magic;
+}
+
+static inline void netmem_or_pp_magic(netmem_ref netmem, unsigned long pp_magic)
+{
+ __netmem_clear_lsb(netmem)->pp_magic |= pp_magic;
+}
+
+static inline void netmem_clear_pp_magic(netmem_ref netmem)
+{
+ __netmem_clear_lsb(netmem)->pp_magic = 0;
+}
+
+static inline void netmem_set_pp(netmem_ref netmem, struct page_pool *pool)
+{
+ __netmem_clear_lsb(netmem)->pp = pool;
+}
+
+static inline void netmem_set_dma_addr(netmem_ref netmem,
+ unsigned long dma_addr)
+{
+ __netmem_clear_lsb(netmem)->dma_addr = dma_addr;
+}
+#endif
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 2abe6e919224..a813d30d2135 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/device.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -24,8 +25,12 @@
#include <trace/events/page_pool.h>
+#include "mp_dmabuf_devmem.h"
+#include "netmem_priv.h"
#include "page_pool_priv.h"
+DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
#define DEFER_TIME (msecs_to_jiffies(1000))
#define DEFER_WARN_INTERVAL (60 * HZ)
@@ -187,6 +192,8 @@ static int page_pool_init(struct page_pool *pool,
int cpuid)
{
unsigned int ring_qsize = 1024; /* Default */
+ struct netdev_rx_queue *rxq;
+ int err;
page_pool_struct_check();
@@ -268,7 +275,37 @@ static int page_pool_init(struct page_pool *pool,
if (pool->dma_map)
get_device(pool->p.dev);
+ if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) {
+ /* We rely on rtnl_lock()ing to make sure netdev_rx_queue
+ * configuration doesn't change while we're initializing
+ * the page_pool.
+ */
+ ASSERT_RTNL();
+ rxq = __netif_get_rx_queue(pool->slow.netdev,
+ pool->slow.queue_idx);
+ pool->mp_priv = rxq->mp_params.mp_priv;
+ }
+
+ if (pool->mp_priv) {
+ err = mp_dmabuf_devmem_init(pool);
+ if (err) {
+ pr_warn("%s() mem-provider init failed %d\n", __func__,
+ err);
+ goto free_ptr_ring;
+ }
+
+ static_branch_inc(&page_pool_mem_providers);
+ }
+
return 0;
+
+free_ptr_ring:
+ ptr_ring_cleanup(&pool->ring, NULL);
+#ifdef CONFIG_PAGE_POOL_STATS
+ if (!pool->system)
+ free_percpu(pool->recycle_stats);
+#endif
+ return err;
}
static void page_pool_uninit(struct page_pool *pool)
@@ -358,7 +395,7 @@ static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool)
if (unlikely(!netmem))
break;
- if (likely(page_to_nid(netmem_to_page(netmem)) == pref_nid)) {
+ if (likely(netmem_is_pref_nid(netmem, pref_nid))) {
pool->alloc.cache[pool->alloc.count++] = netmem;
} else {
/* NUMA mismatch;
@@ -452,32 +489,6 @@ unmap_failed:
return false;
}
-static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
-{
- struct page *page = netmem_to_page(netmem);
-
- page->pp = pool;
- page->pp_magic |= PP_SIGNATURE;
-
- /* Ensuring all pages have been split into one fragment initially:
- * page_pool_set_pp_info() is only called once for every page when it
- * is allocated from the page allocator and page_pool_fragment_page()
- * is dirtying the same cache line as the page->pp_magic above, so
- * the overhead is negligible.
- */
- page_pool_fragment_netmem(netmem, 1);
- if (pool->has_init_callback)
- pool->slow.init_callback(netmem, pool->slow.init_arg);
-}
-
-static void page_pool_clear_pp_info(netmem_ref netmem)
-{
- struct page *page = netmem_to_page(netmem);
-
- page->pp_magic = 0;
- page->pp = NULL;
-}
-
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -573,7 +584,10 @@ netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
return netmem;
/* Slow-path: cache empty, do real allocation */
- netmem = __page_pool_alloc_pages_slow(pool, gfp);
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp);
+ else
+ netmem = __page_pool_alloc_pages_slow(pool, gfp);
return netmem;
}
EXPORT_SYMBOL(page_pool_alloc_netmem);
@@ -609,6 +623,28 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict)
return inflight;
}
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem)
+{
+ netmem_set_pp(netmem, pool);
+ netmem_or_pp_magic(netmem, PP_SIGNATURE);
+
+ /* Ensuring all pages have been split into one fragment initially:
+ * page_pool_set_pp_info() is only called once for every page when it
+ * is allocated from the page allocator and page_pool_fragment_page()
+ * is dirtying the same cache line as the page->pp_magic above, so
+ * the overhead is negligible.
+ */
+ page_pool_fragment_netmem(netmem, 1);
+ if (pool->has_init_callback)
+ pool->slow.init_callback(netmem, pool->slow.init_arg);
+}
+
+void page_pool_clear_pp_info(netmem_ref netmem)
+{
+ netmem_clear_pp_magic(netmem);
+ netmem_set_pp(netmem, NULL);
+}
+
static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
netmem_ref netmem)
{
@@ -637,8 +673,13 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool,
void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
{
int count;
+ bool put;
- __page_pool_release_page_dma(pool, netmem);
+ put = true;
+ if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv)
+ put = mp_dmabuf_devmem_release_page(pool, netmem);
+ else
+ __page_pool_release_page_dma(pool, netmem);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@@ -646,8 +687,10 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem)
count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
trace_page_pool_state_release(pool, netmem, count);
- page_pool_clear_pp_info(netmem);
- put_page(netmem_to_page(netmem));
+ if (put) {
+ page_pool_clear_pp_info(netmem);
+ put_page(netmem_to_page(netmem));
+ }
/* An optimization would be to call __free_pages(page, pool->p.order)
* knowing page is not part of page-cache (thus avoiding a
* __page_cache_release() call).
@@ -692,8 +735,9 @@ static bool page_pool_recycle_in_cache(netmem_ref netmem,
static bool __page_pool_page_can_be_recycled(netmem_ref netmem)
{
- return page_ref_count(netmem_to_page(netmem)) == 1 &&
- !page_is_pfmemalloc(netmem_to_page(netmem));
+ return netmem_is_net_iov(netmem) ||
+ (page_ref_count(netmem_to_page(netmem)) == 1 &&
+ !page_is_pfmemalloc(netmem_to_page(netmem)));
}
/* If the page refcnt == 1, this will try to recycle the page.
@@ -728,6 +772,7 @@ __page_pool_put_page(struct page_pool *pool, netmem_ref netmem,
/* Page found as candidate for recycling */
return netmem;
}
+
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
* Many drivers split up the page into fragments, and some
@@ -949,7 +994,7 @@ static void page_pool_empty_ring(struct page_pool *pool)
/* Empty recycle ring */
while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) {
/* Verify the refcnt invariant of cached pages */
- if (!(page_ref_count(netmem_to_page(netmem)) == 1))
+ if (!(netmem_ref_count(netmem) == 1))
pr_crit("%s() page_pool refcnt %d violation\n",
__func__, netmem_ref_count(netmem));
@@ -964,6 +1009,12 @@ static void __page_pool_destroy(struct page_pool *pool)
page_pool_unlist(pool);
page_pool_uninit(pool);
+
+ if (pool->mp_priv) {
+ mp_dmabuf_devmem_destroy(pool);
+ static_branch_dec(&page_pool_mem_providers);
+ }
+
kfree(pool);
}
diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h
index 90665d40f1eb..57439787b9c2 100644
--- a/net/core/page_pool_priv.h
+++ b/net/core/page_pool_priv.h
@@ -3,10 +3,56 @@
#ifndef __PAGE_POOL_PRIV_H
#define __PAGE_POOL_PRIV_H
+#include <net/page_pool/helpers.h>
+
+#include "netmem_priv.h"
+
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
int page_pool_list(struct page_pool *pool);
void page_pool_detached(struct page_pool *pool);
void page_pool_unlist(struct page_pool *pool);
+static inline bool
+page_pool_set_dma_addr_netmem(netmem_ref netmem, dma_addr_t addr)
+{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ netmem_set_dma_addr(netmem, addr >> PAGE_SHIFT);
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)netmem_get_dma_addr(netmem)
+ << PAGE_SHIFT;
+ }
+
+ netmem_set_dma_addr(netmem, addr);
+ return false;
+}
+
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr);
+}
+
+#if defined(CONFIG_PAGE_POOL)
+void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem);
+void page_pool_clear_pp_info(netmem_ref netmem);
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq);
+#else
+static inline void page_pool_set_pp_info(struct page_pool *pool,
+ netmem_ref netmem)
+{
+}
+static inline void page_pool_clear_pp_info(netmem_ref netmem)
+{
+}
+static inline int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 3a3277ba167b..48335766c1bf 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -4,10 +4,12 @@
#include <linux/netdevice.h>
#include <linux/xarray.h>
#include <net/net_debug.h>
-#include <net/page_pool/types.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/helpers.h>
+#include <net/page_pool/types.h>
#include <net/sock.h>
+#include "devmem.h"
#include "page_pool_priv.h"
#include "netdev-genl-gen.h"
@@ -212,6 +214,7 @@ static int
page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
+ struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
size_t inflight, refsz;
void *hdr;
@@ -241,6 +244,9 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
pool->user.detach_time))
goto err_cancel;
+ if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id))
+ goto err_cancel;
+
genlmsg_end(rsp, hdr);
return 0;
@@ -344,6 +350,30 @@ void page_pool_unlist(struct page_pool *pool)
mutex_unlock(&page_pools_lock);
}
+int page_pool_check_memory_provider(struct net_device *dev,
+ struct netdev_rx_queue *rxq)
+{
+ struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
+ struct page_pool *pool;
+ struct hlist_node *n;
+
+ if (!binding)
+ return 0;
+
+ mutex_lock(&page_pools_lock);
+ hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
+ if (pool->mp_priv != binding)
+ continue;
+
+ if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
+ mutex_unlock(&page_pools_lock);
+ return 0;
+ }
+ }
+ mutex_unlock(&page_pools_lock);
+ return -ENODATA;
+}
+
static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
{
struct page_pool *pool;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a52638363ea5..74149dc4ee31 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -88,6 +88,7 @@
#include <linux/textsearch.h>
#include "dev.h"
+#include "netmem_priv.h"
#include "sock_destructor.h"
#ifdef CONFIG_SKB_EXTENSIONS
@@ -920,9 +921,9 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list);
}
-static bool is_pp_page(struct page *page)
+static bool is_pp_netmem(netmem_ref netmem)
{
- return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
+ return (netmem_get_pp_magic(netmem) & ~0x3UL) == PP_SIGNATURE;
}
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
@@ -1020,9 +1021,7 @@ EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL)
bool napi_pp_put_page(netmem_ref netmem)
{
- struct page *page = netmem_to_page(netmem);
-
- page = compound_head(page);
+ netmem = netmem_compound_head(netmem);
/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
* in order to preserve any existing bits, such as bit 0 for the
@@ -1031,10 +1030,10 @@ bool napi_pp_put_page(netmem_ref netmem)
* and page_is_pfmemalloc() is checked in __page_pool_put_page()
* to avoid recycling the pfmemalloc page.
*/
- if (unlikely(!is_pp_page(page)))
+ if (unlikely(!is_pp_netmem(netmem)))
return false;
- page_pool_put_full_netmem(page->pp, page_to_netmem(page), false);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
return true;
}
@@ -1061,7 +1060,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
static int skb_pp_frag_ref(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
- struct page *head_page;
+ netmem_ref head_netmem;
int i;
if (!skb->pp_recycle)
@@ -1070,11 +1069,11 @@ static int skb_pp_frag_ref(struct sk_buff *skb)
shinfo = skb_shinfo(skb);
for (i = 0; i < shinfo->nr_frags; i++) {
- head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
- if (likely(is_pp_page(head_page)))
- page_pool_ref_page(head_page);
+ head_netmem = netmem_compound_head(shinfo->frags[i].netmem);
+ if (likely(is_pp_netmem(head_netmem)))
+ page_pool_ref_netmem(head_netmem);
else
- page_ref_inc(head_page);
+ page_ref_inc(netmem_to_page(head_netmem));
}
return 0;
}
@@ -1372,6 +1371,14 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
struct page *p;
u8 *vaddr;
+ if (skb_frag_is_net_iov(frag)) {
+ printk("%sskb frag %d: not readable\n", level, i);
+ len -= skb_frag_size(frag);
+ if (!len)
+ break;
+ continue;
+ }
+
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
@@ -1965,6 +1972,9 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!num_frags)
goto release;
@@ -2138,6 +2148,9 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
unsigned int size;
int headerlen;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
@@ -2476,6 +2489,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
struct sk_buff *n;
int oldheadroom;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
return NULL;
@@ -2820,6 +2836,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
*/
int i, k, eat = (skb->tail + delta) - skb->end;
+ if (!skb_frags_readable(skb))
+ return NULL;
+
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
@@ -2973,6 +2992,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -3161,9 +3183,15 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
+ if (!skb_frags_readable(skb))
+ return false;
+
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
+ if (WARN_ON_ONCE(!skb_frag_page(f)))
+ return false;
+
if (__splice_segment(skb_frag_page(f),
skb_frag_off(f), skb_frag_size(f),
offset, len, spd, false, sk, pipe))
@@ -3381,6 +3409,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
+ if (!skb_frags_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -3460,6 +3491,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
+ if (WARN_ON_ONCE(!skb_frags_readable(skb)))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3560,6 +3594,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
+ if (!skb_frags_readable(skb))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -4051,6 +4088,7 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+ skb1->unreadable = skb->unreadable;
skb_shinfo(skb)->nr_frags = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
@@ -4098,6 +4136,8 @@ static inline void skb_split_no_header(struct sk_buff *skb,
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
+
+ skb1->unreadable = skb->unreadable;
}
/**
@@ -4335,6 +4375,9 @@ next_skb:
return block_limit - abs_offset;
}
+ if (!skb_frags_readable(st->cur_skb))
+ return 0;
+
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
@@ -4411,6 +4454,41 @@ void skb_abort_seq_read(struct skb_seq_state *st)
}
EXPORT_SYMBOL(skb_abort_seq_read);
+/**
+ * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
+ * @st: source skb_seq_state
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from @offset bytes into the source @st to the destination
+ * buffer @to. `offset` should increase (or be unchanged) with each subsequent
+ * call to this function. If offset needs to decrease from the previous use `st`
+ * should be reset first.
+ *
+ * Return: 0 on success or -EINVAL if the copy ended early
+ */
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len)
+{
+ const u8 *data;
+ u32 sqlen;
+
+ for (;;) {
+ sqlen = skb_seq_read(offset, &data, st);
+ if (sqlen == 0)
+ return -EINVAL;
+ if (sqlen >= len) {
+ memcpy(to, data, len);
+ return 0;
+ }
+ memcpy(to, data, sqlen);
+ to += sqlen;
+ offset += sqlen;
+ len -= sqlen;
+ }
+}
+EXPORT_SYMBOL(skb_copy_seq_read);
+
#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
@@ -5947,7 +6025,10 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
if (to->pp_recycle != from->pp_recycle)
return false;
- if (len <= skb_tailroom(to)) {
+ if (skb_frags_readable(from) != skb_frags_readable(to))
+ return false;
+
+ if (len <= skb_tailroom(to) && skb_frags_readable(from)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
@@ -6124,6 +6205,9 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
+ if (!skb_frags_readable(skb))
+ return -EFAULT;
+
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
@@ -6803,7 +6887,7 @@ void skb_condense(struct sk_buff *skb)
{
if (skb->data_len) {
if (skb->data_len > skb->end - skb->tail ||
- skb_cloned(skb))
+ skb_cloned(skb) || !skb_frags_readable(skb))
return;
/* Nice, we can free page frag(s) right now */
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index bbf40b999713..b1dcbd3be89e 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -293,7 +293,7 @@ out:
/* If we trim data a full sg elem before curr pointer update
* copybreak and current so that any future copy operations
* start at new copy location.
- * However trimed data that has not yet been used in a copy op
+ * However trimmed data that has not yet been used in a copy op
* does not require an update.
*/
if (!msg->sg.size) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 468b1239606c..bbb57b5af0b1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -124,6 +124,7 @@
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/sock.h>
@@ -1049,6 +1050,69 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
return 0;
}
+#ifdef CONFIG_PAGE_POOL
+
+/* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in
+ * 1 syscall. The limit exists to limit the amount of memory the kernel
+ * allocates to copy these tokens.
+ */
+#define MAX_DONTNEED_TOKENS 128
+
+static noinline_for_stack int
+sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
+{
+ unsigned int num_tokens, i, j, k, netmem_num = 0;
+ struct dmabuf_token *tokens;
+ netmem_ref netmems[16];
+ int ret = 0;
+
+ if (!sk_is_tcp(sk))
+ return -EBADF;
+
+ if (optlen % sizeof(struct dmabuf_token) ||
+ optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
+ return -EINVAL;
+
+ tokens = kvmalloc_array(optlen, sizeof(*tokens), GFP_KERNEL);
+ if (!tokens)
+ return -ENOMEM;
+
+ num_tokens = optlen / sizeof(struct dmabuf_token);
+ if (copy_from_sockptr(tokens, optval, optlen)) {
+ kvfree(tokens);
+ return -EFAULT;
+ }
+
+ xa_lock_bh(&sk->sk_user_frags);
+ for (i = 0; i < num_tokens; i++) {
+ for (j = 0; j < tokens[i].token_count; j++) {
+ netmem_ref netmem = (__force netmem_ref)__xa_erase(
+ &sk->sk_user_frags, tokens[i].token_start + j);
+
+ if (netmem &&
+ !WARN_ON_ONCE(!netmem_is_net_iov(netmem))) {
+ netmems[netmem_num++] = netmem;
+ if (netmem_num == ARRAY_SIZE(netmems)) {
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+ netmem_num = 0;
+ xa_lock_bh(&sk->sk_user_frags);
+ }
+ ret++;
+ }
+ }
+ }
+
+ xa_unlock_bh(&sk->sk_user_frags);
+ for (k = 0; k < netmem_num; k++)
+ WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
+
+ kvfree(tokens);
+ return ret;
+}
+#endif
+
void sockopt_lock_sock(struct sock *sk)
{
/* When current->bpf_ctx is set, the setsockopt is called from
@@ -1211,6 +1275,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
ret = -EOPNOTSUPP;
return ret;
}
+#ifdef CONFIG_PAGE_POOL
+ case SO_DEVMEM_DONTNEED:
+ return sock_devmem_dontneed(sk, optval, optlen);
+#endif
}
sockopt_lock_sock(sk);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index d3dbb92153f2..724b6856fcc3 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1183,6 +1183,7 @@ static void sock_hash_free(struct bpf_map *map)
sock_put(elem->sk);
sock_hash_free_elem(htab, elem);
}
+ cond_resched();
}
/* wait for psock readers accessing its map link */
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 1f46de394f2e..281bbac5539d 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -178,8 +178,9 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_KSZ8795, KSZ8795_NAME);
#define KSZ9477_INGRESS_TAG_LEN 2
#define KSZ9477_PTP_TAG_LEN 4
-#define KSZ9477_PTP_TAG_INDICATION 0x80
+#define KSZ9477_PTP_TAG_INDICATION BIT(7)
+#define KSZ9477_TAIL_TAG_EG_PORT_M GENMASK(2, 0)
#define KSZ9477_TAIL_TAG_PRIO GENMASK(8, 7)
#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
@@ -312,7 +313,7 @@ static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev)
{
/* Tag decoding */
u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
- unsigned int port = tag[0] & 7;
+ unsigned int port = tag[0] & KSZ9477_TAIL_TAG_EG_PORT_M;
unsigned int len = KSZ_EGRESS_TAG_LEN;
/* Extra 4-bytes PTP timestamp */
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 781834ef57c3..dd345efa114b 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -427,6 +427,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = {
[const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw",
[const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc",
[const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp",
+ [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter",
};
static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT);
@@ -654,6 +655,7 @@ int ethtool_check_max_channel(struct net_device *dev,
{
u64 max_rxnfc_in_use;
u32 max_rxfh_in_use;
+ int max_mp_in_use;
/* ensure the new Rx count fits within the configured Rx flow
* indirection table/rxnfc settings
@@ -672,6 +674,13 @@ int ethtool_check_max_channel(struct net_device *dev,
return -EINVAL;
}
+ max_mp_in_use = dev_get_min_mp_channel_count(dev);
+ if (channels.combined_count + channels.rx_count <= max_mp_in_use) {
+ if (info)
+ GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing memory provider setting (%d)", max_mp_in_use);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c
index 560dd039c662..4ef7c6e32d10 100644
--- a/net/ethtool/phy.c
+++ b/net/ethtool/phy.c
@@ -164,7 +164,7 @@ int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_rtnl;
/* No PHY, return early */
- if (!req_info.pdn->phy)
+ if (!req_info.pdn)
goto err_unlock_rtnl;
ret = ethnl_phy_reply_size(&req_info.base, info->extack);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index a06e790042e2..ebdfd5b64e17 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -427,6 +427,9 @@ static void hsr_proxy_announce(struct timer_list *t)
* of SAN nodes stored in ProxyNodeTable.
*/
interlink = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
+ if (!interlink)
+ goto done;
+
list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
if (hsr_addr_is_redbox(hsr, node->macaddress_A))
continue;
@@ -441,6 +444,7 @@ static void hsr_proxy_announce(struct timer_list *t)
mod_timer(&hsr->announce_proxy_timer, jiffies + interval);
}
+done:
rcu_read_unlock();
}
@@ -625,7 +629,6 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
/* Overflow soon to find bugs easier: */
hsr->sequence_nr = HSR_SEQNR_START;
hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
- hsr->interlink_sequence_nr = HSR_SEQNR_START;
timer_setup(&hsr->announce_timer, hsr_announce, 0);
timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index ab1f8d35d9dc..fcfeb79bb040 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -203,7 +203,6 @@ struct hsr_priv {
struct timer_list prune_proxy_timer;
int announce_count;
u16 sequence_nr;
- u16 interlink_sequence_nr; /* Interlink port seq_nr */
u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
spinlock_t seqnr_lock; /* locking for sequence_nr */
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index af6cf64a00e0..464f683e016d 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -67,7 +67,16 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
skb_reset_mac_len(skb);
- hsr_forward_skb(skb, port);
+ /* Only the frames received over the interlink port will assign a
+ * sequence number and require synchronisation vs other sender.
+ */
+ if (port->type == HSR_PT_INTERLINK) {
+ spin_lock_bh(&hsr->seqnr_lock);
+ hsr_forward_skb(skb, port);
+ spin_unlock_bh(&hsr->seqnr_lock);
+ } else {
+ hsr_forward_skb(skb, port);
+ }
finish_consume:
return RX_HANDLER_CONSUMED;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 47378ca41904..f3281312eb5e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -115,7 +115,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- skb_page_unref(sg_page(sg), skb->pp_recycle);
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET_ESPINTCP
diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
index 78b869b31492..3e30745e2c09 100644
--- a/net/ipv4/fou_core.c
+++ b/net/ipv4/fou_core.c
@@ -336,11 +336,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
struct gro_remcsum grc;
u8 proto;
+ skb_gro_remcsum_init(&grc);
+
if (!fou)
goto out;
- skb_gro_remcsum_init(&grc);
-
off = skb_gro_offset(skb);
len = off + sizeof(*guehdr);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8a5680b4e786..4f77bd862e95 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -285,6 +285,8 @@
#include <trace/events/tcp.h>
#include <net/rps.h>
+#include "../core/devmem.h"
+
/* Track pending CMSGs. */
enum {
TCP_CMSG_INQ = 1,
@@ -471,6 +473,7 @@ void tcp_init_sock(struct sock *sk)
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
sk_sockets_allocated_inc(sk);
+ xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1);
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -2160,6 +2163,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
skb = tcp_recv_skb(sk, seq, &offset);
}
+ if (!skb_frags_readable(skb))
+ break;
+
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, tss);
zc->msg_flags |= TCP_CMSG_TS;
@@ -2177,6 +2183,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
break;
}
page = skb_frag_page(frags);
+ if (WARN_ON_ONCE(!page))
+ break;
+
prefetchw(page);
pages[pages_to_map++] = page;
length += PAGE_SIZE;
@@ -2235,6 +2244,7 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
struct scm_timestamping_internal *tss)
{
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
bool has_timestamping = false;
if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
@@ -2274,14 +2284,18 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
}
}
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
+ if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
- if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
+ if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
has_timestamping = true;
else
tss->ts[2] = (struct timespec64) {0};
@@ -2317,6 +2331,220 @@ static int tcp_inq_hint(struct sock *sk)
return inq;
}
+/* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */
+struct tcp_xa_pool {
+ u8 max; /* max <= MAX_SKB_FRAGS */
+ u8 idx; /* idx <= max */
+ __u32 tokens[MAX_SKB_FRAGS];
+ netmem_ref netmems[MAX_SKB_FRAGS];
+};
+
+static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p)
+{
+ int i;
+
+ /* Commit part that has been copied to user space. */
+ for (i = 0; i < p->idx; i++)
+ __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY,
+ (__force void *)p->netmems[i], GFP_KERNEL);
+ /* Rollback what has been pre-allocated and is no longer needed. */
+ for (; i < p->max; i++)
+ __xa_erase(&sk->sk_user_frags, p->tokens[i]);
+
+ p->max = 0;
+ p->idx = 0;
+}
+
+static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p)
+{
+ if (!p->max)
+ return;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ xa_unlock_bh(&sk->sk_user_frags);
+}
+
+static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p,
+ unsigned int max_frags)
+{
+ int err, k;
+
+ if (p->idx < p->max)
+ return 0;
+
+ xa_lock_bh(&sk->sk_user_frags);
+
+ tcp_xa_pool_commit_locked(sk, p);
+
+ for (k = 0; k < max_frags; k++) {
+ err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k],
+ XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL);
+ if (err)
+ break;
+ }
+
+ xa_unlock_bh(&sk->sk_user_frags);
+
+ p->max = k;
+ p->idx = 0;
+ return k ? 0 : err;
+}
+
+/* On error, returns the -errno. On success, returns number of bytes sent to the
+ * user. May not consume all of @remaining_len.
+ */
+static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
+ unsigned int offset, struct msghdr *msg,
+ int remaining_len)
+{
+ struct dmabuf_cmsg dmabuf_cmsg = { 0 };
+ struct tcp_xa_pool tcp_xa_pool;
+ unsigned int start;
+ int i, copy, n;
+ int sent = 0;
+ int err = 0;
+
+ tcp_xa_pool.max = 0;
+ tcp_xa_pool.idx = 0;
+ do {
+ start = skb_headlen(skb);
+
+ if (skb_frags_readable(skb)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Copy header. */
+ copy = start - offset;
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ n = copy_to_iter(skb->data + offset, copy,
+ &msg->msg_iter);
+ if (n != copy) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ offset += copy;
+ remaining_len -= copy;
+
+ /* First a dmabuf_cmsg for # bytes copied to user
+ * buffer.
+ */
+ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
+ dmabuf_cmsg.frag_size = copy;
+ err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
+ sizeof(dmabuf_cmsg), &dmabuf_cmsg);
+ if (err || msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~MSG_CTRUNC;
+ if (!err)
+ err = -ETOOSMALL;
+ goto out;
+ }
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+
+ /* after that, send information of dmabuf pages through a
+ * sequence of cmsg
+ */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct net_iov *niov;
+ u64 frag_offset;
+ int end;
+
+ /* !skb_frags_readable() should indicate that ALL the
+ * frags in this skb are dmabuf net_iovs. We're checking
+ * for that flag above, but also check individual frags
+ * here. If the tcp stack is not setting
+ * skb_frags_readable() correctly, we still don't want
+ * to crash here.
+ */
+ if (!skb_frag_net_iov(frag)) {
+ net_err_ratelimited("Found non-dmabuf skb with net_iov");
+ err = -ENODEV;
+ goto out;
+ }
+
+ niov = skb_frag_net_iov(frag);
+ end = start + skb_frag_size(frag);
+ copy = end - offset;
+
+ if (copy > 0) {
+ copy = min(copy, remaining_len);
+
+ frag_offset = net_iov_virtual_addr(niov) +
+ skb_frag_off(frag) + offset -
+ start;
+ dmabuf_cmsg.frag_offset = frag_offset;
+ dmabuf_cmsg.frag_size = copy;
+ err = tcp_xa_pool_refill(sk, &tcp_xa_pool,
+ skb_shinfo(skb)->nr_frags - i);
+ if (err)
+ goto out;
+
+ /* Will perform the exchange later */
+ dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx];
+ dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov);
+
+ offset += copy;
+ remaining_len -= copy;
+
+ err = put_cmsg(msg, SOL_SOCKET,
+ SO_DEVMEM_DMABUF,
+ sizeof(dmabuf_cmsg),
+ &dmabuf_cmsg);
+ if (err || msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~MSG_CTRUNC;
+ if (!err)
+ err = -ETOOSMALL;
+ goto out;
+ }
+
+ atomic_long_inc(&niov->pp_ref_count);
+ tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
+
+ sent += copy;
+
+ if (remaining_len == 0)
+ goto out;
+ }
+ start = end;
+ }
+
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!remaining_len)
+ goto out;
+
+ /* if remaining_len is not satisfied yet, we need to go to the
+ * next frag in the frag_list to satisfy remaining_len.
+ */
+ skb = skb_shinfo(skb)->frag_list ?: skb->next;
+
+ offset = offset - start;
+ } while (skb);
+
+ if (remaining_len) {
+ err = -EFAULT;
+ goto out;
+ }
+
+out:
+ tcp_xa_pool_commit(sk, &tcp_xa_pool);
+ if (!sent)
+ sent = err;
+
+ return sent;
+}
+
/*
* This routine copies from a sock struct into the user buffer.
*
@@ -2330,6 +2558,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
int *cmsg_flags)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int last_copied_dmabuf = -1; /* uninitialized */
int copied = 0;
u32 peek_seq;
u32 *seq;
@@ -2509,15 +2738,44 @@ found_ok_skb:
}
if (!(flags & MSG_TRUNC)) {
- err = skb_copy_datagram_msg(skb, offset, msg, used);
- if (err) {
- /* Exception. Bailout! */
- if (!copied)
- copied = -EFAULT;
+ if (last_copied_dmabuf != -1 &&
+ last_copied_dmabuf != !skb_frags_readable(skb))
break;
+
+ if (skb_frags_readable(skb)) {
+ err = skb_copy_datagram_msg(skb, offset, msg,
+ used);
+ if (err) {
+ /* Exception. Bailout! */
+ if (!copied)
+ copied = -EFAULT;
+ break;
+ }
+ } else {
+ if (!(flags & MSG_SOCK_DEVMEM)) {
+ /* dmabuf skbs can only be received
+ * with the MSG_SOCK_DEVMEM flag.
+ */
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+
+ err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
+ used);
+ if (err <= 0) {
+ if (!copied)
+ copied = -EFAULT;
+
+ break;
+ }
+ used = err;
}
}
+ last_copied_dmabuf = !skb_frags_readable(skb);
+
WRITE_ONCE(*seq, *seq + used);
copied += used;
len -= used;
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index fe6178715ba0..e7658c5d6b79 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -30,7 +30,7 @@ void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
}
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
- struct sk_msg *msg, u32 apply_bytes, int flags)
+ struct sk_msg *msg, u32 apply_bytes)
{
bool apply = apply_bytes;
struct scatterlist *sge;
@@ -167,7 +167,7 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
if (unlikely(!psock))
return -EPIPE;
- ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
+ ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes) :
tcp_bpf_push_locked(sk, msg, bytes, flags, false);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e37488d3453f..9f314dfa1490 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5391,6 +5391,9 @@ restart:
for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
n = tcp_skb_next(skb, list);
+ if (!skb_frags_readable(skb))
+ goto skip_this;
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list, root);
@@ -5411,17 +5414,20 @@ restart:
break;
}
- if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) &&
+ if (n && n != tail && skb_frags_readable(n) &&
+ tcp_skb_can_collapse_rx(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
}
+skip_this:
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
if (end_of_skbs ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
return;
__skb_queue_head_init(&tmp);
@@ -5463,7 +5469,8 @@ restart:
if (!skb ||
skb == tail ||
!tcp_skb_can_collapse_rx(nskb, skb) ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ !skb_frags_readable(skb))
goto end;
}
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index eb631e66ee03..5afe5e57c89b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -79,6 +79,7 @@
#include <linux/seq_file.h>
#include <linux/inetdevice.h>
#include <linux/btf_ids.h>
+#include <linux/skbuff_ref.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
@@ -2512,10 +2513,25 @@ static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
}
#endif
+static void tcp_release_user_frags(struct sock *sk)
+{
+#ifdef CONFIG_PAGE_POOL
+ unsigned long index;
+ void *netmem;
+
+ xa_for_each(&sk->sk_user_frags, index, netmem)
+ WARN_ON_ONCE(!napi_pp_put_page((__force netmem_ref)netmem));
+#endif
+}
+
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ tcp_release_user_frags(sk);
+
+ xa_destroy(&sk->sk_user_frags);
+
trace_tcp_destroy_sock(sk);
tcp_clear_xmit_timers(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ad562272db2e..bb1fe1ba867a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -628,6 +628,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
+ xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
+
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cdd0def14427..4fd746bd4d54 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2344,7 +2344,8 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (unlikely(TCP_SKB_CB(skb)->eor) ||
tcp_has_tx_tstamp(skb) ||
- !skb_pure_zcopy_same(skb, next))
+ !skb_pure_zcopy_same(skb, next) ||
+ skb_frags_readable(skb) != skb_frags_readable(next))
return false;
len -= skb->len;
@@ -3264,6 +3265,8 @@ static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
return false;
if (skb_cloned(skb))
return false;
+ if (!skb_frags_readable(skb))
+ return false;
/* Some heuristics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return false;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 86169127e4d1..79064580c8c0 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -282,6 +282,7 @@ static int tcp_write_timeout(struct sock *sk)
expired = retransmits_timed_out(sk, retry_until,
READ_ONCE(icsk->icsk_user_timeout));
tcp_fastopen_active_detect_blackhole(sk, expired);
+ mptcp_active_detect_blackhole(sk, expired);
if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 3920e8aa1031..b2400c226a32 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -132,7 +132,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- skb_page_unref(sg_page(sg), skb->pp_recycle);
+ skb_page_unref(page_to_netmem(sg_page(sg)),
+ skb->pp_recycle);
}
#ifdef CONFIG_INET6_ESPINTCP
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index fdf8b658fede..c61df637232a 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -55,10 +55,21 @@
#define HE_DURATION_S(shift, streams, gi, bps) \
(HE_DURATION(streams, gi, bps) >> shift)
+/* gi in HE/EHT is identical. It matches enum nl80211_eht_gi as well */
+#define EHT_GI_08 HE_GI_08
+#define EHT_GI_16 HE_GI_16
+#define EHT_GI_32 HE_GI_32
+
+#define EHT_DURATION(streams, gi, bps) \
+ HE_DURATION(streams, gi, bps)
+#define EHT_DURATION_S(shift, streams, gi, bps) \
+ HE_DURATION_S(shift, streams, gi, bps)
+
#define BW_20 0
#define BW_40 1
#define BW_80 2
#define BW_160 3
+#define BW_320 4
/*
* Define group sort order: HT40 -> SGI -> #streams
@@ -68,17 +79,26 @@
#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
#define IEEE80211_HE_MAX_STREAMS 8
+#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */
+
+#define IEEE80211_EHT_MAX_STREAMS 8
+#define IEEE80211_EHT_STREAM_GROUPS 15 /* BW(=5) * GI(=3) */
#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_HT_STREAM_GROUPS)
#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_VHT_STREAM_GROUPS)
+#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \
+ IEEE80211_HE_STREAM_GROUPS)
+#define IEEE80211_EHT_GROUPS_NB (IEEE80211_EHT_MAX_STREAMS * \
+ IEEE80211_EHT_STREAM_GROUPS)
#define IEEE80211_HT_GROUP_0 0
#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
#define IEEE80211_HE_GROUP_0 (IEEE80211_VHT_GROUP_0 + IEEE80211_VHT_GROUPS_NB)
+#define IEEE80211_EHT_GROUP_0 (IEEE80211_HE_GROUP_0 + IEEE80211_HE_GROUPS_NB)
-#define MCS_GROUP_RATES 12
+#define MCS_GROUP_RATES 14
#define HT_GROUP_IDX(_streams, _sgi, _ht40) \
IEEE80211_HT_GROUP_0 + \
@@ -203,6 +223,69 @@
#define HE_GROUP(_streams, _gi, _bw) \
__HE_GROUP(_streams, _gi, _bw, \
HE_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_BW2VBPS(_bw, r5, r4, r3, r2, r1) \
+ ((_bw) == BW_320 ? r5 : BW2VBPS(_bw, r4, r3, r2, r1))
+
+#define EHT_GROUP_IDX(_streams, _gi, _bw) \
+ (IEEE80211_EHT_GROUP_0 + \
+ IEEE80211_EHT_MAX_STREAMS * 3 * (_bw) + \
+ IEEE80211_EHT_MAX_STREAMS * (_gi) + \
+ (_streams) - 1)
+
+#define __EHT_GROUP(_streams, _gi, _bw, _s) \
+ [EHT_GROUP_IDX(_streams, _gi, _bw)] = { \
+ .shift = _s, \
+ .duration = { \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 3920, 1960, 980, 468, 234)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 5880, 2937, 1470, 702, 351)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 7840, 3920, 1960, 936, 468)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 11760, 5880, 2940, 1404, 702)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 15680, 7840, 3920, 1872, 936)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 17640, 8820, 4410, 2106, 1053)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 19600, 9800, 4900, 2340, 1170)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 23520, 11760, 5880, 2808, 1404)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 26133, 13066, 6533, 3120, 1560)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 29400, 14700, 7350, 3510, 1755)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 32666, 16333, 8166, 3900, 1950)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 35280, 17640, 8820, 4212, 2106)), \
+ EHT_DURATION_S(_s, _streams, _gi, \
+ EHT_BW2VBPS(_bw, 39200, 19600, 9800, 4680, 2340)) \
+ } \
+}
+
+#define EHT_GROUP_SHIFT(_streams, _gi, _bw) \
+ GROUP_SHIFT(EHT_DURATION(_streams, _gi, \
+ EHT_BW2VBPS(_bw, 1960, 980, 490, 234, 117)))
+
+#define EHT_GROUP(_streams, _gi, _bw) \
+ __EHT_GROUP(_streams, _gi, _bw, \
+ EHT_GROUP_SHIFT(_streams, _gi, _bw))
+
+#define EHT_GROUP_RANGE(_gi, _bw) \
+ EHT_GROUP(1, _gi, _bw), \
+ EHT_GROUP(2, _gi, _bw), \
+ EHT_GROUP(3, _gi, _bw), \
+ EHT_GROUP(4, _gi, _bw), \
+ EHT_GROUP(5, _gi, _bw), \
+ EHT_GROUP(6, _gi, _bw), \
+ EHT_GROUP(7, _gi, _bw), \
+ EHT_GROUP(8, _gi, _bw)
+
struct mcs_group {
u8 shift;
u16 duration[MCS_GROUP_RATES];
@@ -376,6 +459,26 @@ static const struct mcs_group airtime_mcs_groups[] = {
HE_GROUP(6, HE_GI_32, BW_160),
HE_GROUP(7, HE_GI_32, BW_160),
HE_GROUP(8, HE_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_20),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_20),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_40),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_40),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_80),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_80),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_160),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_160),
+
+ EHT_GROUP_RANGE(EHT_GI_08, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_16, BW_320),
+ EHT_GROUP_RANGE(EHT_GI_32, BW_320),
};
static u32
@@ -422,6 +525,9 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
case RATE_INFO_BW_160:
bw = BW_160;
break;
+ case RATE_INFO_BW_320:
+ bw = BW_320;
+ break;
default:
WARN_ON_ONCE(1);
return 0;
@@ -443,14 +549,27 @@ static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
idx = status->rate_idx;
group = HE_GROUP_IDX(streams, status->he_gi, bw);
break;
+ case RX_ENC_EHT:
+ streams = status->nss;
+ idx = status->rate_idx;
+ group = EHT_GROUP_IDX(streams, status->eht.gi, bw);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
- if (WARN_ON_ONCE((status->encoding != RX_ENC_HE && streams > 4) ||
- (status->encoding == RX_ENC_HE && streams > 8)))
- return 0;
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ case RX_ENC_HE:
+ if (WARN_ON_ONCE(streams > 8))
+ return 0;
+ break;
+ default:
+ if (WARN_ON_ONCE(streams > 4))
+ return 0;
+ break;
+ }
if (idx >= MCS_GROUP_RATES)
return 0;
@@ -517,7 +636,9 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
stat->nss = ri->nss;
stat->rate_idx = ri->mcs;
- if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ if (ri->flags & RATE_INFO_FLAGS_EHT_MCS)
+ stat->encoding = RX_ENC_EHT;
+ else if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
stat->encoding = RX_ENC_HE;
else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
stat->encoding = RX_ENC_VHT;
@@ -529,7 +650,14 @@ static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat->he_gi = ri->he_gi;
+ switch (stat->encoding) {
+ case RX_ENC_EHT:
+ stat->eht.gi = ri->eht_gi;
+ break;
+ default:
+ stat->he_gi = ri->he_gi;
+ break;
+ }
if (stat->encoding != RX_ENC_LEGACY)
return true;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b02b84ce2130..847304a3a29a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1662,12 +1662,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BEACON_ENABLED);
- if (sdata->wdev.cac_started) {
+ if (sdata->wdev.links[link_id].cac_started) {
chandef = link_conf->chanreq.oper;
- wiphy_delayed_work_cancel(wiphy, &sdata->dfs_cac_timer_work);
+ wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
drv_stop_ap(sdata->local, sdata, link_conf);
@@ -3462,51 +3462,58 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_chan_req chanreq = { .oper = *chandef };
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
int err;
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning) {
- err = -EBUSY;
- goto out_unlock;
- }
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ return -ENOLINK;
/* whatever, but channel contexts should not complain about that one */
- sdata->deflink.smps_mode = IEEE80211_SMPS_OFF;
- sdata->deflink.needed_rx_chains = local->rx_chains;
+ link_data->smps_mode = IEEE80211_SMPS_OFF;
+ link_data->needed_rx_chains = local->rx_chains;
- err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
+ err = ieee80211_link_use_channel(link_data, &chanreq,
IEEE80211_CHANCTX_SHARED);
if (err)
- goto out_unlock;
+ return err;
- wiphy_delayed_work_queue(wiphy, &sdata->dfs_cac_timer_work,
+ wiphy_delayed_work_queue(wiphy, &link_data->dfs_cac_timer_work,
msecs_to_jiffies(cac_time_ms));
- out_unlock:
- return err;
+ return 0;
}
static void ieee80211_end_cac(struct wiphy *wiphy,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_link_data *link_data;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
+ link_data = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link_data)
+ continue;
+
wiphy_delayed_work_cancel(wiphy,
- &sdata->dfs_cac_timer_work);
+ &link_data->dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(&sdata->deflink);
- sdata->wdev.cac_started = false;
+ if (sdata->wdev.links[link_id].cac_started) {
+ ieee80211_link_release_channel(link_data);
+ sdata->wdev.links[link_id].cac_started = false;
}
}
}
@@ -3961,7 +3968,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
if (!list_empty(&local->roc_list) || local->scanning)
return -EBUSY;
- if (sdata->wdev.cac_started)
+ if (sdata->wdev.links[link_id].cac_started)
return -EBUSY;
if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index b72e4036526b..cca6d14084d2 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -683,6 +683,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
ctx->mode = mode;
ctx->conf.radar_enabled = false;
ctx->conf.radio_idx = radio_idx;
+ ctx->radar_detected = false;
_ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false);
return ctx;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6305c4e9cca1..4f0390918b60 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -893,6 +893,8 @@ struct ieee80211_chanctx {
struct ieee80211_chan_req req;
struct ieee80211_chanctx_conf conf;
+
+ bool radar_detected;
};
struct mac80211_qos_map {
@@ -1067,6 +1069,7 @@ struct ieee80211_link_data {
int ap_power_level; /* in dBm */
bool radar_required;
+ struct wiphy_delayed_work dfs_cac_timer_work;
union {
struct ieee80211_link_data_managed mgd;
@@ -1165,8 +1168,6 @@ struct ieee80211_sub_if_data {
struct ieee80211_link_data deflink;
struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
- struct wiphy_delayed_work dfs_cac_timer_work;
-
/* for ieee80211_set_active_links_async() */
struct wiphy_work activate_links_work;
u16 desired_active_links;
@@ -2650,7 +2651,8 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
bool ieee80211_is_radar_required(struct ieee80211_local *local);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *chanctx);
void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
struct wiphy_work *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b4ad66af3af3..6ef0990d3d29 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -462,6 +462,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
{
struct ieee80211_local *local = sdata->local;
unsigned long flags;
+ struct sk_buff_head freeq;
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i, flushed;
@@ -550,15 +551,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
wiphy_work_cancel(local->hw.wiphy,
&sdata->deflink.color_change_finalize_work);
wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->dfs_cac_timer_work);
+ &sdata->deflink.dfs_cac_timer_work);
- if (sdata->wdev.cac_started) {
+ if (sdata->wdev.links[0].cac_started) {
chandef = sdata->vif.bss_conf.chanreq.oper;
WARN_ON(local->suspended);
ieee80211_link_release_channel(&sdata->deflink);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, 0);
}
if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -637,18 +638,32 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
skb_queue_purge(&sdata->status_queue);
}
+ /*
+ * Since ieee80211_free_txskb() may issue __dev_queue_xmit()
+ * which should be called with interrupts enabled, reclamation
+ * is done in two phases:
+ */
+ __skb_queue_head_init(&freeq);
+
+ /* unlink from local queues... */
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_walk_safe(&local->pending[i], skb, tmp) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->control.vif == &sdata->vif) {
__skb_unlink(skb, &local->pending[i]);
- ieee80211_free_txskb(&local->hw, skb);
+ __skb_queue_tail(&freeq, skb);
}
}
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ /* ... and perform actual reclamation with interrupts enabled. */
+ skb_queue_walk_safe(&freeq, skb, tmp) {
+ __skb_unlink(skb, &freeq);
+ ieee80211_free_txskb(&local->hw, skb);
+ }
+
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ieee80211_txq_remove_vlan(local, sdata);
@@ -1729,8 +1744,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
wiphy_work_init(&sdata->work, ieee80211_iface_work);
wiphy_work_init(&sdata->activate_links_work,
ieee80211_activate_links_work);
- wiphy_delayed_work_init(&sdata->dfs_cac_timer_work,
- ieee80211_dfs_cac_timer_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index 1a211b8d4057..0bbac64d5fa0 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -45,6 +45,8 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_color_collision_detection_work);
INIT_LIST_HEAD(&link->assigned_chanctx_list);
INIT_LIST_HEAD(&link->reserved_chanctx_list);
+ wiphy_delayed_work_init(&link->dfs_cac_timer_work,
+ ieee80211_dfs_cac_timer_work);
if (!deflink) {
switch (sdata->vif.type) {
@@ -75,6 +77,16 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
&link->color_change_finalize_work);
wiphy_work_cancel(link->sdata->local->hw.wiphy,
&link->csa.finalize_work);
+
+ if (link->sdata->wdev.links[link->link_id].cac_started) {
+ wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+ cfg80211_cac_event(link->sdata->dev,
+ &link->conf->chanreq.oper,
+ NL80211_RADAR_CAC_ABORTED,
+ GFP_KERNEL, link->link_id);
+ }
+
ieee80211_link_release_channel(link);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 746f51ac0306..735e78adb0db 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3031,18 +3031,19 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t)
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
+ struct ieee80211_link_data *link =
+ container_of(work, struct ieee80211_link_data,
dfs_cac_timer_work.work);
- struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chanreq.oper;
+ struct cfg80211_chan_def chandef = link->conf->chanreq.oper;
+ struct ieee80211_sub_if_data *sdata = link->sdata;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (sdata->wdev.cac_started) {
- ieee80211_link_release_channel(&sdata->deflink);
+ if (sdata->wdev.links[link->link_id].cac_started) {
+ ieee80211_link_release_channel(link);
cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_FINISHED,
- GFP_KERNEL);
+ GFP_KERNEL, link->link_id);
}
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d823d58303e8..7be52345f218 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -32,7 +32,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_scan_cancel(local);
- ieee80211_dfs_cac_cancel(local);
+ ieee80211_dfs_cac_cancel(local, NULL);
ieee80211_roc_purge(local, NULL);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 14e18fd9e919..adb88c06b598 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -575,6 +575,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -585,8 +586,9 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
return false;
list_for_each_entry(sdata_iter, &local->interfaces, list) {
- if (sdata_iter->wdev.cac_started)
- return false;
+ for_each_valid_link(&sdata_iter->wdev, link_id)
+ if (sdata_iter->wdev.links[link_id].cac_started)
+ return false;
}
return true;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3b9468dc3029..f94faa86ba8a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3467,24 +3467,44 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
return ts;
}
-void ieee80211_dfs_cac_cancel(struct ieee80211_local *local)
+/* Cancel CAC for the interfaces under the specified @local. If @ctx is
+ * also provided, only the interfaces using that ctx will be canceled.
+ */
+void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
+ struct ieee80211_chanctx *ctx)
{
struct ieee80211_sub_if_data *sdata;
struct cfg80211_chan_def chandef;
+ struct ieee80211_link_data *link;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ unsigned int link_id;
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry(sdata, &local->interfaces, list) {
- wiphy_delayed_work_cancel(local->hw.wiphy,
- &sdata->dfs_cac_timer_work);
-
- if (sdata->wdev.cac_started) {
- chandef = sdata->vif.bss_conf.chanreq.oper;
- ieee80211_link_release_channel(&sdata->deflink);
- cfg80211_cac_event(sdata->dev,
- &chandef,
+ for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+ link_id++) {
+ link = sdata_dereference(sdata->link[link_id],
+ sdata);
+ if (!link)
+ continue;
+
+ chanctx_conf = sdata_dereference(link->conf->chanctx_conf,
+ sdata);
+ if (ctx && &ctx->conf != chanctx_conf)
+ continue;
+
+ wiphy_delayed_work_cancel(local->hw.wiphy,
+ &link->dfs_cac_timer_work);
+
+ if (!sdata->wdev.links[link_id].cac_started)
+ continue;
+
+ chandef = link->conf->chanreq.oper;
+ ieee80211_link_release_channel(link);
+ cfg80211_cac_event(sdata->dev, &chandef,
NL80211_RADAR_CAC_ABORTED,
- GFP_KERNEL);
+ GFP_KERNEL, link_id);
}
}
}
@@ -3494,9 +3514,8 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, radar_detected_work);
- struct cfg80211_chan_def chandef = local->hw.conf.chandef;
+ struct cfg80211_chan_def chandef;
struct ieee80211_chanctx *ctx;
- int num_chanctx = 0;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -3504,25 +3523,46 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
- num_chanctx++;
+ if (!ctx->radar_detected)
+ continue;
+
+ ctx->radar_detected = false;
+
chandef = ctx->conf.def;
+
+ ieee80211_dfs_cac_cancel(local, ctx);
+ cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
}
+}
- ieee80211_dfs_cac_cancel(local);
+static void
+ieee80211_radar_mark_chan_ctx_iterator(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data)
+{
+ struct ieee80211_chanctx *ctx =
+ container_of(chanctx_conf, struct ieee80211_chanctx,
+ conf);
- if (num_chanctx > 1)
- /* XXX: multi-channel is not supported yet */
- WARN_ON(1);
- else
- cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
+ if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER)
+ return;
+
+ if (data && data != chanctx_conf)
+ return;
+
+ ctx->radar_detected = true;
}
-void ieee80211_radar_detected(struct ieee80211_hw *hw)
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_radar_detected(local);
+ ieee80211_iter_chan_contexts_atomic(hw, ieee80211_radar_mark_chan_ctx_iterator,
+ chanctx_conf);
+
wiphy_work_queue(hw->wiphy, &local->radar_detected_work);
}
EXPORT_SYMBOL(ieee80211_radar_detected);
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index 99382c317ebb..38d8121331d4 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -12,6 +12,7 @@
#include <net/netns/generic.h>
#include "protocol.h"
+#include "mib.h"
#define MPTCP_SYSCTL_PATH "net/mptcp"
@@ -27,8 +28,11 @@ struct mptcp_pernet {
#endif
unsigned int add_addr_timeout;
+ unsigned int blackhole_timeout;
unsigned int close_timeout;
unsigned int stale_loss_cnt;
+ atomic_t active_disable_times;
+ unsigned long active_disable_stamp;
u8 mptcp_enabled;
u8 checksum_enabled;
u8 allow_join_initial_addr_port;
@@ -87,6 +91,8 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
pernet->add_addr_timeout = TCP_RTO_MAX;
+ pernet->blackhole_timeout = 3600;
+ atomic_set(&pernet->active_disable_times, 0);
pernet->close_timeout = TCP_TIMEWAIT_LEN;
pernet->checksum_enabled = 0;
pernet->allow_join_initial_addr_port = 1;
@@ -151,6 +157,20 @@ static int proc_available_schedulers(const struct ctl_table *ctl,
return ret;
}
+static int proc_blackhole_detect_timeout(const struct ctl_table *table,
+ int write, void *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct mptcp_pernet *pernet = mptcp_get_pernet(current->nsproxy->net_ns);
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ atomic_set(&pernet->active_disable_times, 0);
+
+ return ret;
+}
+
static struct ctl_table mptcp_sysctl_table[] = {
{
.procname = "enabled",
@@ -217,6 +237,13 @@ static struct ctl_table mptcp_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "blackhole_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_blackhole_detect_timeout,
+ .extra1 = SYSCTL_ZERO,
+ },
};
static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
@@ -240,6 +267,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[6].data = &pernet->scheduler;
/* table[7] is for available_schedulers which is read-only info */
table[8].data = &pernet->close_timeout;
+ table[9].data = &pernet->blackhole_timeout;
hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
ARRAY_SIZE(mptcp_sysctl_table));
@@ -277,6 +305,111 @@ static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
#endif /* CONFIG_SYSCTL */
+/* The following code block is to deal with middle box issues with MPTCP,
+ * similar to what is done with TFO.
+ * The proposed solution is to disable active MPTCP globally when SYN+MPC are
+ * dropped, while SYN without MPC aren't. In this case, active side MPTCP is
+ * disabled globally for 1hr at first. Then if it happens again, it is disabled
+ * for 2h, then 4h, 8h, ...
+ * The timeout is reset back to 1hr when a successful active MPTCP connection is
+ * fully established.
+ */
+
+/* Disable active MPTCP and record current jiffies and active_disable_times */
+void mptcp_active_disable(struct sock *sk)
+{
+ struct net *net = sock_net(sk);
+ struct mptcp_pernet *pernet;
+
+ pernet = mptcp_get_pernet(net);
+
+ if (!READ_ONCE(pernet->blackhole_timeout))
+ return;
+
+ /* Paired with READ_ONCE() in mptcp_active_should_disable() */
+ WRITE_ONCE(pernet->active_disable_stamp, jiffies);
+
+ /* Paired with smp_rmb() in mptcp_active_should_disable().
+ * We want pernet->active_disable_stamp to be updated first.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&pernet->active_disable_times);
+
+ MPTCP_INC_STATS(net, MPTCP_MIB_BLACKHOLE);
+}
+
+/* Calculate timeout for MPTCP active disable
+ * Return true if we are still in the active MPTCP disable period
+ * Return false if timeout already expired and we should use active MPTCP
+ */
+bool mptcp_active_should_disable(struct sock *ssk)
+{
+ struct net *net = sock_net(ssk);
+ unsigned int blackhole_timeout;
+ struct mptcp_pernet *pernet;
+ unsigned long timeout;
+ int disable_times;
+ int multiplier;
+
+ pernet = mptcp_get_pernet(net);
+ blackhole_timeout = READ_ONCE(pernet->blackhole_timeout);
+
+ if (!blackhole_timeout)
+ return false;
+
+ disable_times = atomic_read(&pernet->active_disable_times);
+ if (!disable_times)
+ return false;
+
+ /* Paired with smp_mb__before_atomic() in mptcp_active_disable() */
+ smp_rmb();
+
+ /* Limit timeout to max: 2^6 * initial timeout */
+ multiplier = 1 << min(disable_times - 1, 6);
+
+ /* Paired with the WRITE_ONCE() in mptcp_active_disable(). */
+ timeout = READ_ONCE(pernet->active_disable_stamp) +
+ multiplier * blackhole_timeout * HZ;
+
+ return time_before(jiffies, timeout);
+}
+
+/* Enable active MPTCP and reset active_disable_times if needed */
+void mptcp_active_enable(struct sock *sk)
+{
+ struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
+
+ if (atomic_read(&pernet->active_disable_times)) {
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
+ atomic_set(&pernet->active_disable_times, 0);
+ }
+}
+
+/* Check the number of retransmissions, and fallback to TCP if needed */
+void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
+{
+ struct mptcp_subflow_context *subflow;
+ u32 timeouts;
+
+ if (!sk_is_mptcp(ssk))
+ return;
+
+ timeouts = inet_csk(ssk)->icsk_retransmits;
+ subflow = mptcp_subflow_ctx(ssk);
+
+ if (subflow->request_mptcp && ssk->sk_state == TCP_SYN_SENT) {
+ if (timeouts == 2 || (timeouts < 2 && expired)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDROP);
+ subflow->mpc_drop = 1;
+ mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
+ } else {
+ subflow->mpc_drop = 0;
+ }
+ }
+}
+
static int __net_init mptcp_net_init(struct net *net)
{
struct mptcp_pernet *pernet = mptcp_get_pernet(net);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index ec0d461cb921..38c2efc82b94 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -15,6 +15,8 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPCapableACKRX", MPTCP_MIB_MPCAPABLEPASSIVEACK),
SNMP_MIB_ITEM("MPCapableFallbackACK", MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK),
SNMP_MIB_ITEM("MPCapableFallbackSYNACK", MPTCP_MIB_MPCAPABLEACTIVEFALLBACK),
+ SNMP_MIB_ITEM("MPCapableSYNTXDrop", MPTCP_MIB_MPCAPABLEACTIVEDROP),
+ SNMP_MIB_ITEM("MPCapableSYNTXDisabled", MPTCP_MIB_MPCAPABLEACTIVEDISABLED),
SNMP_MIB_ITEM("MPFallbackTokenInit", MPTCP_MIB_TOKENFALLBACKINIT),
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
@@ -73,6 +75,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+ SNMP_MIB_ITEM("Blackhole", MPTCP_MIB_BLACKHOLE),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index d68136f93dac..c8ffe18a8722 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -10,6 +10,8 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_MPCAPABLEPASSIVEACK, /* Received third ACK with MP_CAPABLE */
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK,/* Server-side fallback during 3-way handshake */
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK, /* Client-side fallback during 3-way handshake */
+ MPTCP_MIB_MPCAPABLEACTIVEDROP, /* Client-side fallback due to a MPC drop */
+ MPTCP_MIB_MPCAPABLEACTIVEDISABLED, /* Client-side disabled due to past issues */
MPTCP_MIB_TOKENFALLBACKINIT, /* Could not init/allocate token */
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
@@ -74,6 +76,7 @@ enum linux_mptcp_mib_field {
*/
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
+ MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 62a42f7ee7cb..64fe0e7d87d7 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -338,15 +338,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
+ struct timer_list *add_timer = NULL;
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
- if (entry && (!check_id || entry->addr.id == addr->id))
+ if (entry && (!check_id || entry->addr.id == addr->id)) {
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
+ add_timer = &entry->add_timer;
+ }
+ if (!check_id && entry)
+ list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
- if (entry && (!check_id || entry->addr.id == addr->id))
- sk_stop_timer_sync(sk, &entry->add_timer);
+ /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
+ if (add_timer)
+ sk_stop_timer_sync(sk, add_timer);
return entry;
}
@@ -1444,7 +1450,6 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
entry = mptcp_pm_del_add_timer(msk, addr, false);
if (entry) {
- list_del(&entry->list);
kfree(entry);
return true;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 37ebcb7640eb..c2317919fc14 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3717,13 +3717,6 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg)
return 0;
}
-static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
-{
- subflow->request_mptcp = 0;
- __mptcp_do_fallback(msk);
-}
-
static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct mptcp_subflow_context *subflow;
@@ -3744,9 +3737,14 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
mptcp_subflow_early_fallback(msk, subflow);
#endif
- if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
- MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
- mptcp_subflow_early_fallback(msk, subflow);
+ if (subflow->request_mptcp) {
+ if (mptcp_active_should_disable(sk)) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
+ mptcp_subflow_early_fallback(msk, subflow);
+ } else if (mptcp_token_new_connect(ssk) < 0) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
+ mptcp_subflow_early_fallback(msk, subflow);
+ }
}
WRITE_ONCE(msk->write_seq, subflow->idsn);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index bf03bff9ac44..74417aae08d0 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -531,7 +531,8 @@ struct mptcp_subflow_context {
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
close_event_done : 1, /* has done the post-closed part */
- __unused : 9;
+ mpc_drop : 1, /* the MPC option has been dropped in a rtx */
+ __unused : 8;
bool data_avail;
bool scheduled;
u32 remote_nonce;
@@ -697,6 +698,11 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
unsigned int mptcp_close_timeout(const struct sock *sk);
int mptcp_get_pm_type(const struct net *net);
const char *mptcp_get_scheduler(const struct net *net);
+
+void mptcp_active_disable(struct sock *sk);
+bool mptcp_active_should_disable(struct sock *ssk);
+void mptcp_active_enable(struct sock *sk);
+
void mptcp_get_available_schedulers(char *buf, size_t maxlen);
void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
struct mptcp_subflow_context *subflow,
@@ -1215,6 +1221,14 @@ static inline void mptcp_do_fallback(struct sock *ssk)
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ pr_fallback(msk);
+ subflow->request_mptcp = 0;
+ __mptcp_do_fallback(msk);
+}
+
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
{
struct mptcp_ext *mpext;
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index b9b14e75e8c2..1040b3b9696b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -546,6 +546,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_capable = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_active_enable(parent);
mptcp_propagate_state(parent, sk, subflow, &mp_opt);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -591,6 +592,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
}
} else if (mptcp_check_fallback(sk)) {
+ /* It looks like MPTCP is blocked, while TCP is not */
+ if (subflow->mpc_drop)
+ mptcp_active_disable(parent);
fallback:
mptcp_propagate_state(parent, sk, subflow, NULL);
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 5c1ff07eaee0..df72b0376970 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -670,8 +670,14 @@ static int __init nf_flow_table_module_init(void)
if (ret)
goto out_offload;
+ ret = nf_flow_register_bpf();
+ if (ret)
+ goto out_bpf;
+
return 0;
+out_bpf:
+ nf_flow_table_offload_exit();
out_offload:
unregister_pernet_subsys(&nf_flow_table_net_ops);
return ret;
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 8b541a080342..b0f199171932 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -101,7 +101,7 @@ static int __init nf_flow_inet_module_init(void)
nft_register_flowtable_type(&flowtable_ipv6);
nft_register_flowtable_type(&flowtable_inet);
- return nf_flow_register_bpf();
+ return 0;
}
static void __exit nf_flow_inet_module_exit(void)
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 947566dba1ea..ac3c9e9cf0f3 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -9,7 +9,8 @@
struct nft_socket {
enum nft_socket_keys key:8;
- u8 level;
+ u8 level; /* cgroupv2 level to extract */
+ u8 level_user; /* cgroupv2 level provided by userspace */
u8 len;
union {
u8 dreg;
@@ -53,6 +54,28 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
memcpy(dest, &cgid, sizeof(u64));
return true;
}
+
+/* process context only, uses current->nsproxy. */
+static noinline int nft_socket_cgroup_subtree_level(void)
+{
+ struct cgroup *cgrp = cgroup_get_from_path("/");
+ int level;
+
+ if (!cgrp)
+ return -ENOENT;
+
+ level = cgrp->level;
+
+ cgroup_put(cgrp);
+
+ if (WARN_ON_ONCE(level > 255))
+ return -ERANGE;
+
+ if (WARN_ON_ONCE(level < 0))
+ return -EINVAL;
+
+ return level;
+}
#endif
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
@@ -110,13 +133,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
*dest = READ_ONCE(sk->sk_mark);
} else {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
break;
case NFT_SOCKET_WILDCARD:
if (!sk_fullsock(sk)) {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
nft_socket_wildcard(pkt, regs, sk, dest);
break;
@@ -124,7 +147,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
case NFT_SOCKET_CGROUPV2:
if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
regs->verdict.code = NFT_BREAK;
- return;
+ goto out_put_sk;
}
break;
#endif
@@ -133,6 +156,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
}
+out_put_sk:
if (sk != skb->sk)
sock_gen_put(sk);
}
@@ -173,9 +197,10 @@ static int nft_socket_init(const struct nft_ctx *ctx,
case NFT_SOCKET_MARK:
len = sizeof(u32);
break;
-#ifdef CONFIG_CGROUPS
+#ifdef CONFIG_SOCK_CGROUP_DATA
case NFT_SOCKET_CGROUPV2: {
unsigned int level;
+ int err;
if (!tb[NFTA_SOCKET_LEVEL])
return -EINVAL;
@@ -184,6 +209,17 @@ static int nft_socket_init(const struct nft_ctx *ctx,
if (level > 255)
return -EOPNOTSUPP;
+ err = nft_socket_cgroup_subtree_level();
+ if (err < 0)
+ return err;
+
+ priv->level_user = level;
+
+ level += err;
+ /* Implies a giant cgroup tree */
+ if (WARN_ON_ONCE(level > 255))
+ return -EOPNOTSUPP;
+
priv->level = level;
len = sizeof(u64);
break;
@@ -208,7 +244,7 @@ static int nft_socket_dump(struct sk_buff *skb,
if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg))
return -1;
if (priv->key == NFT_SOCKET_CGROUPV2 &&
- nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level)))
+ nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level_user)))
return -1;
return 0;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4a364cdd445e..a705ec214254 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2216,7 +2216,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
res = run_filter(skb, sk, snaplen);
if (!res)
@@ -2336,7 +2336,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
res = run_filter(skb, sk, snaplen);
if (!res)
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d2f49db70523..f2f9b75008bb 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -361,8 +361,24 @@ static const u8 besteffort[] = {
static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
static const u8 bulk_order[] = {1, 0, 2, 3};
+/* There is a big difference in timing between the accurate values placed in the
+ * cache and the approximations given by a single Newton step for small count
+ * values, particularly when stepping from count 1 to 2 or vice versa. Hence,
+ * these values are calculated using eight Newton steps, using the
+ * implementation below. Above 16, a single Newton step gives sufficient
+ * accuracy in either direction, given the precision stored.
+ *
+ * The magnitude of the error when stepping up to count 2 is such as to give the
+ * value that *should* have been produced at count 4.
+ */
+
#define REC_INV_SQRT_CACHE (16)
-static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
+static const u32 inv_sqrt_cache[REC_INV_SQRT_CACHE] = {
+ ~0, ~0, 3037000500, 2479700525,
+ 2147483647, 1920767767, 1753413056, 1623345051,
+ 1518500250, 1431655765, 1358187914, 1294981364,
+ 1239850263, 1191209601, 1147878294, 1108955788
+};
/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
* new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
@@ -388,47 +404,14 @@ static void cobalt_newton_step(struct cobalt_vars *vars)
static void cobalt_invsqrt(struct cobalt_vars *vars)
{
if (vars->count < REC_INV_SQRT_CACHE)
- vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
+ vars->rec_inv_sqrt = inv_sqrt_cache[vars->count];
else
cobalt_newton_step(vars);
}
-/* There is a big difference in timing between the accurate values placed in
- * the cache and the approximations given by a single Newton step for small
- * count values, particularly when stepping from count 1 to 2 or vice versa.
- * Above 16, a single Newton step gives sufficient accuracy in either
- * direction, given the precision stored.
- *
- * The magnitude of the error when stepping up to count 2 is such as to give
- * the value that *should* have been produced at count 4.
- */
-
-static void cobalt_cache_init(void)
-{
- struct cobalt_vars v;
-
- memset(&v, 0, sizeof(v));
- v.rec_inv_sqrt = ~0U;
- cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
-
- for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
- cobalt_newton_step(&v);
-
- cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
- }
-}
-
static void cobalt_vars_init(struct cobalt_vars *vars)
{
memset(vars, 0, sizeof(*vars));
-
- if (!cobalt_rec_inv_sqrt_cache[0]) {
- cobalt_cache_init();
- cobalt_rec_inv_sqrt_cache[0] = ~0;
- }
}
/* CoDel control_law is t + interval/sqrt(count)
diff --git a/net/socket.c b/net/socket.c
index 0a2bd22ec105..8d8b84fa404a 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -946,11 +946,17 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
memset(&tss, 0, sizeof(tss));
tsflags = READ_ONCE(sk->sk_tsflags);
- if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
- (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
+ (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
+ skb_is_err_queue(skb) ||
+ !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if_index = 0;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 41c8c0e3ba2e..3b3e3cd7027a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -170,6 +170,12 @@ static inline int for_each_rdev_check_rtnl(void)
if (for_each_rdev_check_rtnl()) {} else \
list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+enum bss_source_type {
+ BSS_SOURCE_DIRECT = 0,
+ BSS_SOURCE_MBSSID,
+ BSS_SOURCE_STA_PROFILE,
+};
+
struct cfg80211_internal_bss {
struct list_head list;
struct list_head hidden_list;
@@ -191,6 +197,8 @@ struct cfg80211_internal_bss {
*/
u8 parent_bssid[ETH_ALEN] __aligned(2);
+ enum bss_source_type bss_source;
+
/* must be last because of priv member */
struct cfg80211_bss pub;
};
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 34e5acff3935..1e3ed29f7cfc 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -94,7 +94,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
lockdep_assert_held(&rdev->wiphy.mtx);
- if (wdev->cac_started)
+ if (wdev->links[0].cac_started)
return -EBUSY;
if (wdev->u.ibss.ssid_len)
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index aaca65b66af4..2c6654075ca9 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -127,7 +127,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
- if (wdev->cac_started)
+ if (wdev->links[0].cac_started)
return -EBUSY;
if (!setup->chandef.chan) {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 4052041a19ea..4dac81854721 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -1110,26 +1110,28 @@ EXPORT_SYMBOL(__cfg80211_radar_event);
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp)
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
unsigned long timeout;
- /* not yet supported */
- if (wdev->valid_links)
+ if (WARN_ON(wdev->valid_links &&
+ !(wdev->valid_links & BIT(link_id))))
return;
- trace_cfg80211_cac_event(netdev, event);
+ trace_cfg80211_cac_event(netdev, event, link_id);
- if (WARN_ON(!wdev->cac_started && event != NL80211_RADAR_CAC_STARTED))
+ if (WARN_ON(!wdev->links[link_id].cac_started &&
+ event != NL80211_RADAR_CAC_STARTED))
return;
switch (event) {
case NL80211_RADAR_CAC_FINISHED:
- timeout = wdev->cac_start_time +
- msecs_to_jiffies(wdev->cac_time_ms);
+ timeout = wdev->links[link_id].cac_start_time +
+ msecs_to_jiffies(wdev->links[link_id].cac_time_ms);
WARN_ON(!time_after_eq(jiffies, timeout));
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
memcpy(&rdev->cac_done_chandef, chandef,
@@ -1138,10 +1140,10 @@ void cfg80211_cac_event(struct net_device *netdev,
cfg80211_sched_dfs_chan_update(rdev);
fallthrough;
case NL80211_RADAR_CAC_ABORTED:
- wdev->cac_started = false;
+ wdev->links[link_id].cac_started = false;
break;
case NL80211_RADAR_CAC_STARTED:
- wdev->cac_started = true;
+ wdev->links[link_id].cac_started = true;
break;
default:
WARN_ON(1);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b368e23847dd..9ab777e0bd4d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6066,7 +6066,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->start_ap)
return -EOPNOTSUPP;
- if (wdev->cac_started)
+ if (wdev->links[link_id].cac_started)
return -EBUSY;
if (wdev->links[link_id].ap.beacon_interval)
@@ -9778,7 +9778,8 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
return ERR_PTR(-ENOMEM);
if (n_ssids)
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = n_ssids;
if (ie_len) {
if (n_ssids)
@@ -10072,6 +10073,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int link_id = nl80211_link_id(info->attrs);
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_chan_def chandef;
enum nl80211_dfs_regions dfs_region;
@@ -10121,7 +10123,20 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
goto unlock;
}
- if (cfg80211_beaconing_iface_active(wdev) || wdev->cac_started) {
+ if (cfg80211_beaconing_iface_active(wdev)) {
+ /* During MLO other link(s) can beacon, only the current link
+ * can not already beacon
+ */
+ if (wdev->valid_links &&
+ !wdev->links[link_id].ap.beacon_interval) {
+ /* nothing */
+ } else {
+ err = -EBUSY;
+ goto unlock;
+ }
+ }
+
+ if (wdev->links[link_id].cac_started) {
err = -EBUSY;
goto unlock;
}
@@ -10141,7 +10156,8 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (WARN_ON(!cac_time_ms))
cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
- err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms);
+ err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms,
+ link_id);
if (!err) {
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
@@ -10157,9 +10173,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
default:
break;
}
- wdev->cac_started = true;
- wdev->cac_start_time = jiffies;
- wdev->cac_time_ms = cac_time_ms;
+ wdev->links[link_id].cac_started = true;
+ wdev->links[link_id].cac_start_time = jiffies;
+ wdev->links[link_id].cac_time_ms = cac_time_ms;
}
unlock:
wiphy_unlock(wiphy);
@@ -10507,17 +10523,21 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
NL80211_BSS_CHAIN_SIGNAL))
goto nla_put_failure;
- switch (rdev->wiphy.signal_type) {
- case CFG80211_SIGNAL_TYPE_MBM:
- if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
- goto nla_put_failure;
- break;
- case CFG80211_SIGNAL_TYPE_UNSPEC:
- if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
- goto nla_put_failure;
- break;
- default:
- break;
+ if (intbss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ case CFG80211_SIGNAL_TYPE_UNSPEC:
+ if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC,
+ res->signal))
+ goto nla_put_failure;
+ break;
+ default:
+ break;
+ }
}
switch (wdev->iftype) {
@@ -16511,10 +16531,10 @@ nl80211_set_ttlm(struct sk_buff *skb, struct genl_info *info)
SELECTOR(__sel, NETDEV_UP_NOTMX, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX) \
- SELECTOR(__sel, NETDEV_UP_NOTMX_NOMLO, \
+ SELECTOR(__sel, NETDEV_UP_NOTMX_MLO, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_NO_WIPHY_MTX | \
- NL80211_FLAG_MLO_UNSUPPORTED) \
+ NL80211_FLAG_MLO_VALID_LINK_ID) \
SELECTOR(__sel, NETDEV_UP_CLEAR, \
NL80211_FLAG_NEED_NETDEV_UP | \
NL80211_FLAG_CLEAR_SKB) \
@@ -17409,7 +17429,7 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NO_WIPHY_MTX |
- NL80211_FLAG_MLO_UNSUPPORTED),
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index ec3f4aa1c807..f5adbf6b5c84 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1200,26 +1200,27 @@ static inline int
rdev_start_radar_detection(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms)
+ u32 cac_time_ms, int link_id)
{
int ret = -EOPNOTSUPP;
trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef,
- cac_time_ms);
+ cac_time_ms, link_id);
if (rdev->ops->start_radar_detection)
ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev,
- chandef, cac_time_ms);
+ chandef, cac_time_ms,
+ link_id);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline void
rdev_end_cac(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev, unsigned int link_id)
{
- trace_rdev_end_cac(&rdev->wiphy, dev);
+ trace_rdev_end_cac(&rdev->wiphy, dev, link_id);
if (rdev->ops->end_cac)
- rdev->ops->end_cac(&rdev->wiphy, dev);
+ rdev->ops->end_cac(&rdev->wiphy, dev, link_id);
trace_rdev_return_void(&rdev->wiphy);
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4a27f3823e25..6489ba943a63 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -4229,6 +4229,8 @@ EXPORT_SYMBOL(regulatory_pre_cac_allowed);
static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev;
+ unsigned int link_id;
+
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
@@ -4241,16 +4243,17 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
struct cfg80211_chan_def *chandef;
- if (!wdev->cac_started)
- continue;
+ for_each_valid_link(wdev, link_id) {
+ if (!wdev->links[link_id].cac_started)
+ continue;
- /* FIXME: radar detection is tied to link 0 for now */
- chandef = wdev_chandef(wdev, 0);
- if (!chandef)
- continue;
+ chandef = wdev_chandef(wdev, link_id);
+ if (!chandef)
+ continue;
- if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
- rdev_end_cac(rdev, wdev->netdev);
+ if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
+ rdev_end_cac(rdev, wdev->netdev, link_id);
+ }
}
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 64eeed82d43d..59a90bf3c0d6 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1910,6 +1910,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
known->pub.bssid_index = new->pub.bssid_index;
known->pub.use_for &= new->pub.use_for;
known->pub.cannot_use_reasons = new->pub.cannot_use_reasons;
+ known->bss_source = new->bss_source;
return true;
}
@@ -2008,10 +2009,10 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
return found;
free_ies:
- ies = (void *)rcu_dereference(tmp->pub.beacon_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.beacon_ies);
if (ies)
kfree_rcu(ies, rcu_head);
- ies = (void *)rcu_dereference(tmp->pub.proberesp_ies);
+ ies = (void *)rcu_access_pointer(tmp->pub.proberesp_ies);
if (ies)
kfree_rcu(ies, rcu_head);
@@ -2149,11 +2150,7 @@ struct cfg80211_inform_single_bss_data {
const u8 *ie;
size_t ielen;
- enum {
- BSS_SOURCE_DIRECT = 0,
- BSS_SOURCE_MBSSID,
- BSS_SOURCE_STA_PROFILE,
- } bss_source;
+ enum bss_source_type bss_source;
/* Set if reporting bss_source != BSS_SOURCE_DIRECT */
struct cfg80211_bss *source_bss;
u8 max_bssid_indicator;
@@ -2268,6 +2265,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
IEEE80211_MAX_CHAINS);
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
+ tmp.bss_source = data->bss_source;
switch (data->bss_source) {
case BSS_SOURCE_MBSSID:
@@ -2907,6 +2905,9 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
struct element *reporter_rnr = NULL;
struct ieee80211_multi_link_elem *ml_elem;
struct cfg80211_mle *mle;
+ const struct element *ssid_elem;
+ const u8 *ssid = NULL;
+ size_t ssid_len = 0;
u16 control;
u8 ml_common_len;
u8 *new_ie = NULL;
@@ -2961,6 +2962,13 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
bss_change_count,
gfp);
+ ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, tx_data->ie,
+ tx_data->ielen);
+ if (ssid_elem) {
+ ssid = ssid_elem->data;
+ ssid_len = ssid_elem->datalen;
+ }
+
for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) {
const struct ieee80211_neighbor_ap_info *ap_info;
enum nl80211_band band;
@@ -3042,6 +3050,23 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
freq = ieee80211_channel_to_freq_khz(ap_info->channel, band);
data.channel = ieee80211_get_channel_khz(wiphy, freq);
+ /* Skip if BSS entry generated from MBSSID or DIRECT source
+ * frame data available already.
+ */
+ bss = cfg80211_get_bss(wiphy, data.channel, data.bssid, ssid,
+ ssid_len, IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+
+ if (data.capability == bss->capability &&
+ ibss->bss_source != BSS_SOURCE_STA_PROFILE) {
+ cfg80211_put_bss(wiphy, bss);
+ continue;
+ }
+ cfg80211_put_bss(wiphy, bss);
+ }
+
if (use_for == NL80211_BSS_USE_FOR_MLD_LINK &&
!(wiphy->flags & WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY)) {
use_for = 0;
@@ -3467,8 +3492,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
n_channels = ieee80211_get_num_supported_channels(wiphy);
}
- creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
- n_channels * sizeof(void *),
+ creq = kzalloc(struct_size(creq, channels, n_channels) +
+ sizeof(struct cfg80211_ssid),
GFP_ATOMIC);
if (!creq)
return -ENOMEM;
@@ -3476,7 +3501,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->wiphy = wiphy;
creq->wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
- creq->ssids = (void *)&creq->channels[n_channels];
+ creq->ssids = (void *)creq + struct_size(creq, channels, n_channels);
creq->n_channels = n_channels;
creq->n_ssids = 1;
creq->scan_start = jiffies;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d9d7bf8bb5c1..431da30817a6 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -115,7 +115,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
n_channels = i;
}
request->n_channels = n_channels;
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request +
+ struct_size(request, channels, n_channels);
request->n_ssids = 1;
memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 5c26f065bd68..97c21b627791 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -805,9 +805,22 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa,
TP_ARGS(wiphy, netdev)
);
-DEFINE_EVENT(wiphy_netdev_evt, rdev_end_cac,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
- TP_ARGS(wiphy, netdev)
+TRACE_EVENT(rdev_end_cac,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ unsigned int link_id),
+ TP_ARGS(wiphy, netdev, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id)
);
DECLARE_EVENT_CLASS(station_add_change,
@@ -2652,24 +2665,26 @@ TRACE_EVENT(rdev_external_auth,
TRACE_EVENT(rdev_start_radar_detection,
TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms),
- TP_ARGS(wiphy, netdev, chandef, cac_time_ms),
+ u32 cac_time_ms, int link_id),
+ TP_ARGS(wiphy, netdev, chandef, cac_time_ms, link_id),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(u32, cac_time_ms)
+ __field(int, link_id)
),
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->cac_time_ms = cac_time_ms;
+ __entry->link_id = link_id;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
- ", cac_time_ms=%u",
+ ", cac_time_ms=%u, link_id=%d",
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
- __entry->cac_time_ms)
+ __entry->cac_time_ms, __entry->link_id)
);
TRACE_EVENT(rdev_set_mcast_rate,
@@ -3483,18 +3498,21 @@ TRACE_EVENT(cfg80211_radar_event,
);
TRACE_EVENT(cfg80211_cac_event,
- TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt),
- TP_ARGS(netdev, evt),
+ TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt,
+ unsigned int link_id),
+ TP_ARGS(netdev, evt, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
__field(enum nl80211_radar_event, evt)
+ __field(unsigned int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
__entry->evt = evt;
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", event: %d",
- NETDEV_PR_ARG, __entry->evt)
+ TP_printk(NETDEV_PR_FMT ", event: %d, link_id=%u",
+ NETDEV_PR_ARG, __entry->evt, __entry->link_id)
);
DECLARE_EVENT_CLASS(cfg80211_rx_evt,
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index c0e0204b9630..56edb98e5b47 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -211,6 +211,11 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
goto err_unreg_pool;
}
+ if (dev_get_min_mp_channel_count(netdev)) {
+ err = -EBUSY;
+ goto err_unreg_pool;
+ }
+
bpf.command = XDP_SETUP_XSK_POOL;
bpf.xsk.pool = pool;
bpf.xsk.queue_id = queue_id;
@@ -656,9 +661,17 @@ EXPORT_SYMBOL(xp_alloc_batch);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
+ u32 req_count, avail_count;
+
if (pool->free_list_cnt >= count)
return true;
- return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
+
+ req_count = count - pool->free_list_cnt;
+ avail_count = xskq_cons_nb_entries(pool->fq, req_count);
+ if (!avail_count)
+ pool->fq->queue_empty_descs++;
+
+ return avail_count >= req_count;
}
EXPORT_SYMBOL(xp_can_alloc);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 6f2d1621c992..406b20dfee8d 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -306,11 +306,6 @@ static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
return entries >= max ? max : entries;
}
-static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
-{
- return xskq_cons_nb_entries(q, cnt) >= cnt;
-}
-
static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
{
if (q->cached_prod == q->cached_cons)
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 9a44d363ba62..f123b7c9ec82 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -328,12 +328,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
/* User explicitly requested packet offload mode and configured
* policy in addition to the XFRM state. So be civil to users,
* and return an error instead of taking fallback path.
- *
- * This WARN_ON() can be seen as a documentation for driver
- * authors to do not return -EOPNOTSUPP in packet offload mode.
*/
- WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
- if (err != -EOPNOTSUPP || is_packet_offload) {
+ if ((err != -EOPNOTSUPP && !is_packet_offload) || is_packet_offload) {
NL_SET_ERR_MSG_WEAK(extack, "Device failed to offload this state");
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b22767c0c078..914bac03b52a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -110,7 +110,11 @@ struct xfrm_pol_inexact_node {
* 4. saddr:any list from saddr tree
*
* This result set then needs to be searched for the policy with
- * the lowest priority. If two results have same prio, youngest one wins.
+ * the lowest priority. If two candidates have the same priority, the
+ * struct xfrm_policy pos member with the lower number is used.
+ *
+ * This replicates previous single-list-search algorithm which would
+ * return first matching policy in the (ordered-by-priority) list.
*/
struct xfrm_pol_inexact_key {
@@ -197,8 +201,6 @@ xfrm_policy_inexact_lookup_rcu(struct net *net,
static struct xfrm_policy *
xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
bool excl);
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy);
static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
@@ -411,7 +413,6 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
- INIT_HLIST_NODE(&policy->bydst_inexact_list);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
@@ -1229,26 +1230,31 @@ xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
return ERR_PTR(-EEXIST);
}
- chain = &net->xfrm.policy_inexact[dir];
- xfrm_policy_insert_inexact_list(chain, policy);
-
if (delpol)
__xfrm_policy_inexact_prune_bin(bin, false);
return delpol;
}
+static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
+{
+ int dir;
+
+ if (policy->walk.dead)
+ return true;
+
+ dir = xfrm_policy_id2dir(policy->index);
+ return dir >= XFRM_POLICY_MAX;
+}
+
static void xfrm_hash_rebuild(struct work_struct *work)
{
struct net *net = container_of(work, struct net,
xfrm.policy_hthresh.work);
- unsigned int hmask;
struct xfrm_policy *pol;
struct xfrm_policy *policy;
struct hlist_head *chain;
- struct hlist_head *odst;
struct hlist_node *newpos;
- int i;
int dir;
unsigned seq;
u8 lbits4, rbits4, lbits6, rbits6;
@@ -1275,13 +1281,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
- if (policy->walk.dead)
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX)
- continue;
-
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
if (policy->family == AF_INET) {
dbits = rbits4;
@@ -1312,23 +1315,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
goto out_unlock;
}
- /* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- struct hlist_node *n;
-
- hlist_for_each_entry_safe(policy, n,
- &net->xfrm.policy_inexact[dir],
- bydst_inexact_list) {
- hlist_del_rcu(&policy->bydst);
- hlist_del_init(&policy->bydst_inexact_list);
- }
-
- hmask = net->xfrm.policy_bydst[dir].hmask;
- odst = net->xfrm.policy_bydst[dir].table;
- for (i = hmask; i >= 0; i--) {
- hlist_for_each_entry_safe(policy, n, odst + i, bydst)
- hlist_del_rcu(&policy->bydst);
- }
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
@@ -1346,14 +1333,13 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
- if (policy->walk.dead)
+ if (xfrm_policy_is_dead_or_sk(policy))
continue;
- dir = xfrm_policy_id2dir(policy->index);
- if (dir >= XFRM_POLICY_MAX) {
- /* skip socket policies */
- continue;
- }
+
+ hlist_del_rcu(&policy->bydst);
+
newpos = NULL;
+ dir = xfrm_policy_id2dir(policy->index);
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
@@ -1520,42 +1506,6 @@ static const struct rhashtable_params xfrm_pol_inexact_params = {
.automatic_shrinking = true,
};
-static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
- struct xfrm_policy *policy)
-{
- struct xfrm_policy *pol, *delpol = NULL;
- struct hlist_node *newpos = NULL;
- int i = 0;
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if (pol->type == policy->type &&
- pol->if_id == policy->if_id &&
- !selector_cmp(&pol->selector, &policy->selector) &&
- xfrm_policy_mark_match(&policy->mark, pol) &&
- xfrm_sec_ctx_match(pol->security, policy->security) &&
- !WARN_ON(delpol)) {
- delpol = pol;
- if (policy->priority > pol->priority)
- continue;
- } else if (policy->priority >= pol->priority) {
- newpos = &pol->bydst_inexact_list;
- continue;
- }
- if (delpol)
- break;
- }
-
- if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
- hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
- else
- hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
-
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- pol->pos = i;
- i++;
- }
-}
-
static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
struct xfrm_policy *policy,
bool excl)
@@ -2295,10 +2245,52 @@ out:
return pol;
}
+static u32 xfrm_gen_pos_slow(struct net *net)
+{
+ struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* oldest entry is last in list */
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ if (!xfrm_policy_is_dead_or_sk(policy))
+ policy->pos = ++i;
+ }
+
+ return i;
+}
+
+static u32 xfrm_gen_pos(struct net *net)
+{
+ const struct xfrm_policy *policy;
+ u32 i = 0;
+
+ /* most recently added policy is at the head of the list */
+ list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
+ if (xfrm_policy_is_dead_or_sk(policy))
+ continue;
+
+ if (policy->pos == UINT_MAX)
+ return xfrm_gen_pos_slow(net);
+
+ i = policy->pos + 1;
+ break;
+ }
+
+ return i;
+}
+
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
+ switch (dir) {
+ case XFRM_POLICY_IN:
+ case XFRM_POLICY_FWD:
+ case XFRM_POLICY_OUT:
+ pol->pos = xfrm_gen_pos(net);
+ break;
+ }
+
list_add(&pol->walk.all, &net->xfrm.policy_all);
net->xfrm.policy_count[dir]++;
xfrm_pol_hold(pol);
@@ -2315,7 +2307,6 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
hlist_del_rcu(&pol->bydst);
- hlist_del_init(&pol->bydst_inexact_list);
hlist_del(&pol->byidx);
}
@@ -4438,63 +4429,50 @@ EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
#endif
#ifdef CONFIG_XFRM_MIGRATE
-static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
- const struct xfrm_selector *sel_tgt)
-{
- if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
- if (sel_tgt->family == sel_cmp->family &&
- xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
- sel_cmp->family) &&
- xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
- sel_cmp->family) &&
- sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
- sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
- return true;
- }
- } else {
- if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
- return true;
- }
- }
- return false;
-}
-
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
u8 dir, u8 type, struct net *net, u32 if_id)
{
- struct xfrm_policy *pol, *ret = NULL;
- struct hlist_head *chain;
- u32 priority = ~0U;
+ struct xfrm_policy *pol;
+ struct flowi fl;
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
- hlist_for_each_entry(pol, chain, bydst) {
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
- priority = ret->priority;
- break;
- }
- }
- chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst_inexact_list) {
- if ((pol->priority >= priority) && ret)
- break;
+ memset(&fl, 0, sizeof(fl));
- if ((if_id == 0 || pol->if_id == if_id) &&
- xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type) {
- ret = pol;
+ fl.flowi_proto = sel->proto;
+
+ switch (sel->family) {
+ case AF_INET:
+ fl.u.ip4.saddr = sel->saddr.a4;
+ fl.u.ip4.daddr = sel->daddr.a4;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
break;
- }
+ fl.u.flowi4_oif = sel->ifindex;
+ fl.u.ip4.fl4_sport = sel->sport;
+ fl.u.ip4.fl4_dport = sel->dport;
+ break;
+ case AF_INET6:
+ fl.u.ip6.saddr = sel->saddr.in6;
+ fl.u.ip6.daddr = sel->daddr.in6;
+ if (sel->proto == IPSEC_ULPROTO_ANY)
+ break;
+ fl.u.flowi6_oif = sel->ifindex;
+ fl.u.ip6.fl4_sport = sel->sport;
+ fl.u.ip6.fl4_dport = sel->dport;
+ break;
+ default:
+ return ERR_PTR(-EAFNOSUPPORT);
}
- xfrm_pol_hold(ret);
+ rcu_read_lock();
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
+ if (IS_ERR_OR_NULL(pol))
+ goto out_unlock;
- return ret;
+ if (!xfrm_pol_hold_rcu(pol))
+ pol = NULL;
+out_unlock:
+ rcu_read_unlock();
+ return pol;
}
static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
@@ -4631,9 +4609,9 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
/* Stage 1 - find policy */
pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
- if (!pol) {
+ if (IS_ERR_OR_NULL(pol)) {
NL_SET_ERR_MSG(extack, "Target policy not found");
- err = -ENOENT;
+ err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
goto out;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f030669243f9..e851785ff058 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -307,6 +307,7 @@ enum {
CXT_FIXUP_HEADSET_MIC,
CXT_FIXUP_HP_MIC_NO_PRESENCE,
CXT_PINCFG_SWS_JS201D,
+ CXT_PINCFG_TOP_SPEAKER,
};
/* for hda_fixup_thinkpad_acpi() */
@@ -974,6 +975,13 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_PINS,
.v.pins = cxt_pincfg_sws_js201d,
},
+ [CXT_PINCFG_TOP_SPEAKER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1d, 0x82170111 },
+ { }
+ },
+ },
};
static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -1070,6 +1078,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
+ SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
+ SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
{}
};
@@ -1089,6 +1099,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
+ { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
{}
};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 78042ac2b71f..643e0496b093 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -4639,6 +4639,7 @@ HDA_CODEC_ENTRY(0x8086281d, "Meteor Lake HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x8086281e, "Battlemage HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x8086281f, "Raptor Lake P HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x80862820, "Lunar Lake HDMI", patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x80862822, "Panther Lake HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 588738ce7380..452c6e7c20e2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7538,6 +7538,7 @@ enum {
ALC236_FIXUP_HP_GPIO_LED,
ALC236_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ ALC236_FIXUP_LENOVO_INV_DMIC,
ALC298_FIXUP_SAMSUNG_AMP,
ALC298_FIXUP_SAMSUNG_AMP2,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
@@ -7637,6 +7638,7 @@ enum {
ALC287_FIXUP_LENOVO_14ARP8_LEGION_IAH7,
ALC287_FIXUP_LENOVO_SSID_17AA3820,
ALCXXX_FIXUP_CS35LXX,
+ ALC245_FIXUP_CLEVO_NOISY_MIC,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9161,6 +9163,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc236_fixup_hp_mute_led_micmute_vref,
},
+ [ALC236_FIXUP_LENOVO_INV_DMIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic,
+ .chained = true,
+ .chain_id = ALC283_FIXUP_INT_MIC,
+ },
[ALC298_FIXUP_SAMSUNG_AMP] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_samsung_amp,
@@ -9970,6 +9978,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35lxx_autodet_fixup,
},
+ [ALC245_FIXUP_CLEVO_NOISY_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10218,6 +10232,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
@@ -10342,6 +10357,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -10479,6 +10495,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_CS35L56_I2C_2),
@@ -10619,7 +10636,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa741, "Clevo V54x_6x_TNE", ALC245_FIXUP_CLEVO_NOISY_MIC),
+ SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC245_FIXUP_CLEVO_NOISY_MIC),
SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -10742,6 +10760,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -10994,6 +11013,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
+ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 0523c16305db..06349bf0b658 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -356,6 +356,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
}
diff --git a/sound/soc/codecs/chv3-codec.c b/sound/soc/codecs/chv3-codec.c
index ab99effa6874..40020500b1fe 100644
--- a/sound/soc/codecs/chv3-codec.c
+++ b/sound/soc/codecs/chv3-codec.c
@@ -26,6 +26,7 @@ static const struct of_device_id chv3_codec_of_match[] = {
{ .compatible = "google,chv3-codec", },
{ }
};
+MODULE_DEVICE_TABLE(of, chv3_codec_of_match);
static struct platform_driver chv3_codec_platform_driver = {
.driver = {
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
index 8454193ed22a..e95d1f29ef18 100644
--- a/sound/soc/codecs/lpass-va-macro.c
+++ b/sound/soc/codecs/lpass-va-macro.c
@@ -228,11 +228,13 @@ struct va_macro {
struct va_macro_data {
bool has_swr_master;
bool has_npl_clk;
+ int version;
};
static const struct va_macro_data sm8250_va_data = {
.has_swr_master = false,
.has_npl_clk = false,
+ .version = LPASS_CODEC_VERSION_1_0,
};
static const struct va_macro_data sm8450_va_data = {
@@ -1587,7 +1589,14 @@ static int va_macro_probe(struct platform_device *pdev)
goto err_npl;
}
- va_macro_set_lpass_codec_version(va);
+ /**
+ * old version of codecs do not have a reliable way to determine the
+ * version from registers, get them from soc specific data
+ */
+ if (data->version)
+ lpass_macro_set_codec_version(data->version);
+ else /* read version from register */
+ va_macro_set_lpass_codec_version(va);
if (va->has_swr_master) {
/* Set default CLK div to 1 */
diff --git a/sound/soc/codecs/tda7419.c b/sound/soc/codecs/tda7419.c
index 386b99c8023b..7d6fcba9986e 100644
--- a/sound/soc/codecs/tda7419.c
+++ b/sound/soc/codecs/tda7419.c
@@ -623,6 +623,7 @@ static const struct of_device_id tda7419_of_match[] = {
{ .compatible = "st,tda7419" },
{ },
};
+MODULE_DEVICE_TABLE(of, tda7419_of_match);
static struct i2c_driver tda7419_driver = {
.driver = {
diff --git a/sound/soc/google/chv3-i2s.c b/sound/soc/google/chv3-i2s.c
index 08e558f24af8..0ff24653d49f 100644
--- a/sound/soc/google/chv3-i2s.c
+++ b/sound/soc/google/chv3-i2s.c
@@ -322,6 +322,7 @@ static const struct of_device_id chv3_i2s_of_match[] = {
{ .compatible = "google,chv3-i2s" },
{},
};
+MODULE_DEVICE_TABLE(of, chv3_i2s_of_match);
static struct platform_driver chv3_i2s_driver = {
.probe = chv3_i2s_probe,
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index dce6a2086f2a..6da1517c53c6 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -605,7 +605,7 @@ static int broxton_audio_probe(struct platform_device *pdev)
int i;
for (i = 0; i < ARRAY_SIZE(broxton_rt298_dais); i++) {
- if (card->dai_link[i].codecs->name &&
+ if (card->dai_link[i].num_codecs &&
!strncmp(card->dai_link[i].codecs->name, "i2c-INT343A:00",
I2C_NAME_SIZE)) {
if (!strncmp(card->name, "broxton-rt298",
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index c014d85a08b2..df3c2a7b64d2 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -241,7 +241,7 @@ static int snd_byt_cht_cx2072x_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_cx2072x_dais); i++) {
- if (byt_cht_cx2072x_dais[i].codecs->name &&
+ if (byt_cht_cx2072x_dais[i].num_codecs &&
!strcmp(byt_cht_cx2072x_dais[i].codecs->name,
"i2c-14F10720:00")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index f4ac3ddd148b..08c598b7e1ee 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -245,7 +245,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(dailink); i++) {
- if (dailink[i].codecs->name &&
+ if (dailink[i].num_codecs &&
!strcmp(dailink[i].codecs->name, "i2c-DLGS7213:00")) {
dai_index = i;
break;
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 2fcec2e02bb5..77b91ea4dc32 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -546,7 +546,7 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
- if (byt_cht_es8316_dais[i].codecs->name &&
+ if (byt_cht_es8316_dais[i].num_codecs &&
!strcmp(byt_cht_es8316_dais[i].codecs->name,
"i2c-ESSX8316:00")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index a64d1989e28a..db4a33680d94 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1677,7 +1677,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_rt5640_dais); i++) {
- if (byt_rt5640_dais[i].codecs->name &&
+ if (byt_rt5640_dais[i].num_codecs &&
!strcmp(byt_rt5640_dais[i].codecs->name,
"i2c-10EC5640:00")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 80c841b000a3..8514b79f389b 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -910,7 +910,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
/* fix index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_rt5651_dais); i++) {
- if (byt_rt5651_dais[i].codecs->name &&
+ if (byt_rt5651_dais[i].num_codecs &&
!strcmp(byt_rt5651_dais[i].codecs->name,
"i2c-10EC5651:00")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/bytcr_wm5102.c b/sound/soc/intel/boards/bytcr_wm5102.c
index cccb5e90c0fe..e5a7cc606aa9 100644
--- a/sound/soc/intel/boards/bytcr_wm5102.c
+++ b/sound/soc/intel/boards/bytcr_wm5102.c
@@ -605,7 +605,7 @@ static int snd_byt_wm5102_mc_probe(struct platform_device *pdev)
/* find index of codec dai */
for (i = 0; i < ARRAY_SIZE(byt_wm5102_dais); i++) {
- if (byt_wm5102_dais[i].codecs->name &&
+ if (byt_wm5102_dais[i].num_codecs &&
!strcmp(byt_wm5102_dais[i].codecs->name,
"wm5102-codec")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index eb41b7115d01..1da9ceee4d59 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -569,7 +569,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* set correct codec name */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
- if (cht_dailink[i].codecs->name &&
+ if (cht_dailink[i].num_codecs &&
!strcmp(cht_dailink[i].codecs->name,
"i2c-10EC5645:00")) {
dai_index = i;
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index be2d1a8dbca8..d68e5bc755de 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -466,7 +466,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* find index of codec dai */
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
- if (cht_dailink[i].codecs->name &&
+ if (cht_dailink[i].num_codecs &&
!strcmp(cht_dailink[i].codecs->name, RT5672_I2C_DEFAULT)) {
dai_index = i;
break;
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
index 5e2ec60e2954..e4c3492a0c28 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -84,7 +84,6 @@ static const struct dmi_system_id lenovo_yoga_tab3_x90[] = {
/* Lenovo Yoga Tab 3 Pro YT3-X90, codec missing from DSDT */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
index 37ea2e1d2e92..aa5de167e790 100644
--- a/sound/soc/intel/keembay/kmb_platform.c
+++ b/sound/soc/intel/keembay/kmb_platform.c
@@ -814,6 +814,7 @@ static const struct of_device_id kmb_plat_of_match[] = {
{ .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai},
{}
};
+MODULE_DEVICE_TABLE(of, kmb_plat_of_match);
static int kmb_plat_dai_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/mediatek/mt8188/mt8188-mt6359.c b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
index eba6f4c445ff..08ae962afeb9 100644
--- a/sound/soc/mediatek/mt8188/mt8188-mt6359.c
+++ b/sound/soc/mediatek/mt8188/mt8188-mt6359.c
@@ -734,6 +734,7 @@ static int mt8188_headset_codec_init(struct snd_soc_pcm_runtime *rtd)
struct mtk_soc_card_data *soc_card_data = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_jack *jack = &soc_card_data->card_data->jacks[MT8188_JACK_HEADSET];
struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component;
+ struct mtk_platform_card_data *card_data = soc_card_data->card_data;
int ret;
ret = snd_soc_dapm_new_controls(&card->dapm, mt8188_nau8825_widgets,
@@ -762,10 +763,18 @@ static int mt8188_headset_codec_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ if (card_data->flags & ES8326_HS_PRESENT) {
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ } else {
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ }
+
ret = snd_soc_component_set_jack(component, jack, NULL);
if (ret) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 37dccd9c1ba0..32c556c62557 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -4057,6 +4057,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
kfree(substream->runtime);
+ substream->runtime = NULL;
break;
default:
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index b54382131991..496162df5270 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -2050,6 +2050,8 @@ static int sof_link_unload(struct snd_soc_component *scomp, struct snd_soc_dobj
if (!slink)
return 0;
+ slink->link->platforms->name = NULL;
+
kfree(slink->tuples);
list_del(&slink->list);
kfree(slink->hw_configs);
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 5f8d979585b6..3af0b2aab291 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -100,8 +100,8 @@
#define SUN8I_I2S_CTRL_MODE_PCM (0 << 4)
#define SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK BIT(19)
-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED (1 << 19)
-#define SUN8I_I2S_FMT0_LRCLK_POLARITY_NORMAL (0 << 19)
+#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH (1 << 19)
+#define SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW (0 << 19)
#define SUN8I_I2S_FMT0_LRCK_PERIOD_MASK GENMASK(17, 8)
#define SUN8I_I2S_FMT0_LRCK_PERIOD(period) ((period - 1) << 8)
#define SUN8I_I2S_FMT0_BCLK_POLARITY_MASK BIT(7)
@@ -729,65 +729,37 @@ static int sun4i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
unsigned int fmt)
{
- u32 mode, val;
+ u32 mode, lrclk_pol, bclk_pol, val;
u8 offset;
- /*
- * DAI clock polarity
- *
- * The setup for LRCK contradicts the datasheet, but under a
- * scope it's clear that the LRCK polarity is reversed
- * compared to the expected polarity on the bus.
- */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_IB_IF:
- /* Invert both clocks */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- /* Invert bit clock */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- /* Invert frame clock */
- val = 0;
- break;
- case SND_SOC_DAIFMT_NB_NF:
- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- default:
- return -EINVAL;
- }
-
- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
- val);
-
/* DAI Mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 0;
break;
case SND_SOC_DAIFMT_I2S:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 1;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 0;
break;
case SND_SOC_DAIFMT_RIGHT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_RIGHT;
offset = 0;
break;
@@ -805,6 +777,35 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN8I_I2S_TX_CHAN_OFFSET_MASK,
SUN8I_I2S_TX_CHAN_OFFSET(offset));
+ /* DAI clock polarity */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* No inversion */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+ lrclk_pol | bclk_pol);
+
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
@@ -836,65 +837,37 @@ static int sun8i_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
unsigned int fmt)
{
- u32 mode, val;
+ u32 mode, lrclk_pol, bclk_pol, val;
u8 offset;
- /*
- * DAI clock polarity
- *
- * The setup for LRCK contradicts the datasheet, but under a
- * scope it's clear that the LRCK polarity is reversed
- * compared to the expected polarity on the bus.
- */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_IB_IF:
- /* Invert both clocks */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- /* Invert bit clock */
- val = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED |
- SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- case SND_SOC_DAIFMT_NB_IF:
- /* Invert frame clock */
- val = 0;
- break;
- case SND_SOC_DAIFMT_NB_NF:
- val = SUN8I_I2S_FMT0_LRCLK_POLARITY_INVERTED;
- break;
- default:
- return -EINVAL;
- }
-
- regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
- SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
- SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
- val);
-
/* DAI Mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_PCM;
offset = 0;
break;
case SND_SOC_DAIFMT_I2S:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_LOW;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 1;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_LEFT;
offset = 0;
break;
case SND_SOC_DAIFMT_RIGHT_J:
+ lrclk_pol = SUN8I_I2S_FMT0_LRCLK_POLARITY_START_HIGH;
mode = SUN8I_I2S_CTRL_MODE_RIGHT;
offset = 0;
break;
@@ -912,6 +885,36 @@ static int sun50i_h6_i2s_set_soc_fmt(const struct sun4i_i2s *i2s,
SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET_MASK,
SUN50I_H6_I2S_TX_CHAN_SEL_OFFSET(offset));
+ /* DAI clock polarity */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_NORMAL;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Invert both clocks */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Invert bit clock */
+ bclk_pol = SUN8I_I2S_FMT0_BCLK_POLARITY_INVERTED;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Invert frame clock */
+ lrclk_pol ^= SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ /* No inversion */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_FMT0_REG,
+ SUN8I_I2S_FMT0_LRCLK_POLARITY_MASK |
+ SUN8I_I2S_FMT0_BCLK_POLARITY_MASK,
+ lrclk_pol | bclk_pol);
+
+
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index 3f114a2adfce..ab3c6b2544d2 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -2,7 +2,7 @@
//
// tegra210_ahub.c - Tegra210 AHUB driver
//
-// Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
+// Copyright (c) 2020-2024, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/device.h>
@@ -1391,11 +1391,13 @@ static int tegra_ahub_probe(struct platform_device *pdev)
return err;
}
+ pm_runtime_enable(&pdev->dev);
+
err = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
- if (err)
+ if (err) {
+ pm_runtime_disable(&pdev->dev);
return err;
-
- pm_runtime_enable(&pdev->dev);
+ }
return 0;
}
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 2e60e2c212cd..34ffcec264ab 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -52,7 +52,7 @@ $(OUTPUT)hv_fcopy_uio_daemon: $(HV_FCOPY_UIO_DAEMON_IN)
clean:
rm -f $(ALL_PROGRAMS)
- find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(sbindir); \
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
index 099f2c44dbed..f83698f14da2 100644..100755
--- a/tools/hv/lsvmbus
+++ b/tools/hv/lsvmbus
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
import os
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index 43742ac5b00d..7c308f04e7a0 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -93,6 +93,7 @@ enum {
NETDEV_A_PAGE_POOL_INFLIGHT,
NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
NETDEV_A_PAGE_POOL_DETACH_TIME,
+ NETDEV_A_PAGE_POOL_DMABUF,
__NETDEV_A_PAGE_POOL_MAX,
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
@@ -131,6 +132,7 @@ enum {
NETDEV_A_QUEUE_IFINDEX,
NETDEV_A_QUEUE_TYPE,
NETDEV_A_QUEUE_NAPI_ID,
+ NETDEV_A_QUEUE_DMABUF,
__NETDEV_A_QUEUE_MAX,
NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
@@ -174,6 +176,16 @@ enum {
};
enum {
+ NETDEV_A_DMABUF_IFINDEX = 1,
+ NETDEV_A_DMABUF_QUEUES,
+ NETDEV_A_DMABUF_FD,
+ NETDEV_A_DMABUF_ID,
+
+ __NETDEV_A_DMABUF_MAX,
+ NETDEV_A_DMABUF_MAX = (__NETDEV_A_DMABUF_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
@@ -186,6 +198,7 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
+ NETDEV_CMD_BIND_RX,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/tools/net/ynl/lib/.gitignore b/tools/net/ynl/lib/.gitignore
index c18dd8d83cee..296c4035dbf2 100644
--- a/tools/net/ynl/lib/.gitignore
+++ b/tools/net/ynl/lib/.gitignore
@@ -1 +1,2 @@
__pycache__/
+*.d
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
index 11ee801e75e7..6c3b4d4f173a 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h
@@ -34,6 +34,12 @@ DECLARE_TRACE(bpf_testmod_test_write_bare,
TP_ARGS(task, ctx)
);
+/* Used in bpf_testmod_test_read() to test __nullable suffix */
+DECLARE_TRACE(bpf_testmod_test_nullable_bare,
+ TP_PROTO(struct bpf_testmod_test_read_ctx *ctx__nullable),
+ TP_ARGS(ctx__nullable)
+);
+
#undef BPF_TESTMOD_DECLARE_TRACE
#ifdef DECLARE_TRACE_WRITABLE
#define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index fd28c1157bd3..a32771da4293 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -356,6 +356,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
if (bpf_testmod_loop_test(101) > 100)
trace_bpf_testmod_test_read(current, &ctx);
+ trace_bpf_testmod_test_nullable_bare(NULL);
+
/* Magic number to enable writable tp */
if (len == 64) {
struct bpf_testmod_test_writable_ctx writable = {
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 00965a6e83bb..61de88cf4ad0 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -3551,6 +3551,40 @@ static struct btf_raw_test raw_tests[] = {
BTF_STR_SEC("\0x\0?.foo bar:buz"),
},
{
+ .descr = "datasec: name with non-printable first char not is ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0\7foo"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
+{
+ .descr = "datasec: name '\\0' is not ok",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC \0 */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0"),
+ .err_str = "Invalid name",
+ .btf_load_err = true,
+},
+{
.descr = "type name '?foo' is not ok",
.raw_types = {
/* union ?foo; */
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index 7cfac53c0d58..b614a5272dfd 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -9,6 +9,7 @@
enum test_setup_type {
SETUP_SYSCALL_SLEEP,
SETUP_SKB_PROG,
+ SETUP_SKB_PROG_TP,
};
static struct {
@@ -28,6 +29,7 @@ static struct {
{"test_dynptr_clone", SETUP_SKB_PROG},
{"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
{"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
+ {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP},
};
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
@@ -35,7 +37,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
struct dynptr_success *skel;
struct bpf_program *prog;
struct bpf_link *link;
- int err;
+ int err;
skel = dynptr_success__open();
if (!ASSERT_OK_PTR(skel, "dynptr_success__open"))
@@ -47,7 +49,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
goto cleanup;
- bpf_program__set_autoload(prog, true);
+ bpf_program__set_autoload(prog, true);
err = dynptr_success__load(skel);
if (!ASSERT_OK(err, "dynptr_success__load"))
@@ -87,6 +89,37 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ
break;
}
+ case SETUP_SKB_PROG_TP:
+ {
+ struct __sk_buff skb = {};
+ struct bpf_object *obj;
+ int aux_prog_fd;
+
+ /* Just use its test_run to trigger kfree_skb tracepoint */
+ err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &aux_prog_fd);
+ if (!ASSERT_OK(err, "prog_load sched cls"))
+ goto cleanup;
+
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(aux_prog_fd, &topts);
+ bpf_link__destroy(link);
+
+ if (!ASSERT_OK(err, "test_run"))
+ goto cleanup;
+
+ break;
+ }
}
ASSERT_EQ(skel->bss->err, 0, "err");
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c
new file mode 100644
index 000000000000..accc42e01f8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_tp_btf_nullable.skel.h"
+
+void test_tp_btf_nullable(void)
+{
+ if (!env.has_testmod) {
+ test__skip();
+ return;
+ }
+
+ RUN_TESTS(test_tp_btf_nullable);
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index e35bc1eac52a..c3bc186af21e 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -6,6 +6,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
#include <linux/if_ether.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
@@ -1254,6 +1255,30 @@ int skb_invalid_ctx(void *ctx)
return 0;
}
+SEC("fentry/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fentry, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
+SEC("fexit/skb_tx_error")
+__failure __msg("must be referenced or trusted")
+int BPF_PROG(skb_invalid_ctx_fexit, void *skb)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ return 0;
+}
+
/* Reject writes to dynptr slot for uninit arg */
SEC("?raw_tp")
__failure __msg("potential write to dynptr at off=-16")
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
index 5985920d162e..bfcc85686cf0 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_success.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -5,6 +5,7 @@
#include <stdbool.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "bpf_kfuncs.h"
#include "errno.h"
@@ -544,3 +545,25 @@ int test_dynptr_skb_strcmp(struct __sk_buff *skb)
return 1;
}
+
+SEC("tp_btf/kfree_skb")
+int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location)
+{
+ __u8 write_data[2] = {1, 2};
+ struct bpf_dynptr ptr;
+ int ret;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* since tp_btf skbs are read only, writes should fail */
+ ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+ if (ret != -EINVAL) {
+ err = 2;
+ return 1;
+ }
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
new file mode 100644
index 000000000000..bba3e37f749b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+#include "bpf_misc.h"
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare")
+__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'")
+int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ return nullable_ctx->len;
+}
+
+SEC("tp_btf/bpf_testmod_test_nullable_bare")
+int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx)
+{
+ if (nullable_ctx)
+ return nullable_ctx->len;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 8144fd145237..1ee0ef114f9d 100644
--- a/tools/testing/selftests/bpf/xskxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -324,6 +324,25 @@ out:
return zc_avail;
}
+#define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags"
+static unsigned int get_max_skb_frags(void)
+{
+ unsigned int max_skb_frags = 0;
+ FILE *file;
+
+ file = fopen(MAX_SKB_FRAGS_PATH, "r");
+ if (!file) {
+ ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH);
+ return 0;
+ }
+
+ if (fscanf(file, "%u", &max_skb_frags) != 1)
+ ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH);
+
+ fclose(file);
+ return max_skb_frags;
+}
+
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
{"busy-poll", no_argument, 0, 'b'},
@@ -2244,13 +2263,24 @@ static int testapp_poll_rxq_tmout(struct test_spec *test)
static int testapp_too_many_frags(struct test_spec *test)
{
- struct pkt pkts[2 * XSK_DESC__MAX_SKB_FRAGS + 2] = {};
+ struct pkt *pkts;
u32 max_frags, i;
+ int ret;
- if (test->mode == TEST_MODE_ZC)
+ if (test->mode == TEST_MODE_ZC) {
max_frags = test->ifobj_tx->xdp_zc_max_segs;
- else
- max_frags = XSK_DESC__MAX_SKB_FRAGS;
+ } else {
+ max_frags = get_max_skb_frags();
+ if (!max_frags) {
+ ksft_print_msg("Couldn't retrieve MAX_SKB_FRAGS from system, using default (17) value\n");
+ max_frags = 17;
+ }
+ max_frags += 1;
+ }
+
+ pkts = calloc(2 * max_frags + 2, sizeof(struct pkt));
+ if (!pkts)
+ return TEST_FAILURE;
test->mtu = MAX_ETH_JUMBO_SIZE;
@@ -2280,7 +2310,10 @@ static int testapp_too_many_frags(struct test_spec *test)
pkts[2 * max_frags + 1].valid = true;
pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2);
- return testapp_validate_traffic(test);
+ ret = testapp_validate_traffic(test);
+
+ free(pkts);
+ return ret;
}
static int xsk_load_xdp_programs(struct ifobject *ifobj)
diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 885c948c5d83..e46e823f6a1a 100644
--- a/tools/testing/selftests/bpf/xskxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -55,7 +55,6 @@
#define XSK_UMEM__LARGE_FRAME_SIZE (3 * 1024)
#define XSK_UMEM__MAX_FRAME_SIZE (4 * 1024)
#define XSK_DESC__INVALID_OPTION (0xffff)
-#define XSK_DESC__MAX_SKB_FRAGS 18
#define HUGEPAGE_SIZE (2 * 1024 * 1024)
#define PKT_DUMP_NB_TO_PRINT 16
#define RUN_ALL_TESTS UINT_MAX
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 923bf098e2eb..1c04c780db66 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -17,6 +17,7 @@ ipv6_flowlabel
ipv6_flowlabel_mgr
log.txt
msg_zerocopy
+ncdevmem
nettest
psock_fanout
psock_snd
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 27362e40eb37..649f1fe0dc46 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -56,7 +56,7 @@ TEST_PROGS += ip_local_port_range.sh
TEST_PROGS += rps_default_mask.sh
TEST_PROGS += big_tcp.sh
TEST_PROGS += netns-sysctl.sh
-TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh
+TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh xfrm_policy_add_speed.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
@@ -97,6 +97,11 @@ TEST_PROGS += fq_band_pktlimit.sh
TEST_PROGS += vlan_hw_filter.sh
TEST_PROGS += bpf_offload.py
+# YNL files, must be before "include ..lib.mk"
+EXTRA_CLEAN += $(OUTPUT)/libynl.a
+YNL_GEN_FILES := ncdevmem
+TEST_GEN_FILES += $(YNL_GEN_FILES)
+
TEST_FILES := settings
TEST_FILES += in_netns.sh lib.sh net_helper.sh setup_loopback.sh setup_veth.sh
@@ -106,6 +111,10 @@ TEST_INCLUDES := forwarding/lib.sh
include ../lib.mk
+# YNL build
+YNL_GENS := netdev
+include ynl.mk
+
$(OUTPUT)/epoll_busy_poll: LDLIBS += -lcap
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread -lcrypto
diff --git a/tools/testing/selftests/net/lib/csum.c b/tools/testing/selftests/net/lib/csum.c
index b9f3fc3c3426..e0a34e5e8dd5 100644
--- a/tools/testing/selftests/net/lib/csum.c
+++ b/tools/testing/selftests/net/lib/csum.c
@@ -654,10 +654,16 @@ static int recv_verify_packet_ipv4(void *nh, int len)
{
struct iphdr *iph = nh;
uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
+ uint16_t ip_len;
if (len < sizeof(*iph) || iph->protocol != proto)
return -1;
+ ip_len = ntohs(iph->tot_len);
+ if (ip_len > len || ip_len < sizeof(*iph))
+ return -1;
+
+ len = ip_len;
iph_addr_p = &iph->saddr;
if (proto == IPPROTO_TCP)
return recv_verify_packet_tcp(iph + 1, len - sizeof(*iph));
@@ -669,16 +675,22 @@ static int recv_verify_packet_ipv6(void *nh, int len)
{
struct ipv6hdr *ip6h = nh;
uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
+ uint16_t ip_len;
if (len < sizeof(*ip6h) || ip6h->nexthdr != proto)
return -1;
+ ip_len = ntohs(ip6h->payload_len);
+ if (ip_len > len - sizeof(*ip6h))
+ return -1;
+
+ len = ip_len;
iph_addr_p = &ip6h->saddr;
if (proto == IPPROTO_TCP)
- return recv_verify_packet_tcp(ip6h + 1, len - sizeof(*ip6h));
+ return recv_verify_packet_tcp(ip6h + 1, len);
else
- return recv_verify_packet_udp(ip6h + 1, len - sizeof(*ip6h));
+ return recv_verify_packet_udp(ip6h + 1, len);
}
/* return whether auxdata includes TP_STATUS_CSUM_VALID */
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index 7b936a926859..5d796622e730 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -11,6 +11,8 @@ TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
TEST_FILES := mptcp_lib.sh settings
+TEST_INCLUDES := ../lib.sh ../net_helper.sh
+
EXTRA_CLEAN := *.pcap
include ../../lib.mk
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 3564cd06643c..e8d0a01b4144 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -3130,7 +3130,9 @@ fullmesh_tests()
pm_nl_set_limits $ns1 1 3
pm_nl_set_limits $ns2 1 3
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
- pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
+ if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
+ fi
fullmesh=1 speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 3 3 3
diff --git a/tools/testing/selftests/net/ncdevmem.c b/tools/testing/selftests/net/ncdevmem.c
new file mode 100644
index 000000000000..64d6805381c5
--- /dev/null
+++ b/tools/testing/selftests/net/ncdevmem.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#define __EXPORTED_HEADERS__
+
+#include <linux/uio.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#define __iovec_defined
+#include <fcntl.h>
+#include <malloc.h>
+#include <error.h>
+
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+
+#include <linux/memfd.h>
+#include <linux/dma-buf.h>
+#include <linux/udmabuf.h>
+#include <libmnl/libmnl.h>
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/genetlink.h>
+#include <linux/netdev.h>
+#include <time.h>
+#include <net/if.h>
+
+#include "netdev-user.h"
+#include <ynl.h>
+
+#define PAGE_SHIFT 12
+#define TEST_PREFIX "ncdevmem"
+#define NUM_PAGES 16000
+
+#ifndef MSG_SOCK_DEVMEM
+#define MSG_SOCK_DEVMEM 0x2000000
+#endif
+
+/*
+ * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
+ * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
+ *
+ * Usage:
+ *
+ * On server:
+ * ncdevmem -s <server IP> -c <client IP> -f eth1 -l -p 5201 -v 7
+ *
+ * On client:
+ * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
+ * tr \\n \\0 | \
+ * head -c 5G | \
+ * nc <server IP> 5201 -p 5201
+ *
+ * Note this is compatible with regular netcat. i.e. the sender or receiver can
+ * be replaced with regular netcat to test the RX or TX path in isolation.
+ */
+
+static char *server_ip = "192.168.1.4";
+static char *client_ip = "192.168.1.2";
+static char *port = "5201";
+static size_t do_validation;
+static int start_queue = 8;
+static int num_queues = 8;
+static char *ifname = "eth1";
+static unsigned int ifindex;
+static unsigned int dmabuf_id;
+
+void print_bytes(void *ptr, size_t size)
+{
+ unsigned char *p = ptr;
+ int i;
+
+ for (i = 0; i < size; i++)
+ printf("%02hhX ", p[i]);
+ printf("\n");
+}
+
+void print_nonzero_bytes(void *ptr, size_t size)
+{
+ unsigned char *p = ptr;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ putchar(p[i]);
+ printf("\n");
+}
+
+void validate_buffer(void *line, size_t size)
+{
+ static unsigned char seed = 1;
+ unsigned char *ptr = line;
+ int errors = 0;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (ptr[i] != seed) {
+ fprintf(stderr,
+ "Failed validation: expected=%u, actual=%u, index=%lu\n",
+ seed, ptr[i], i);
+ errors++;
+ if (errors > 20)
+ error(1, 0, "validation failed.");
+ }
+ seed++;
+ if (seed == do_validation)
+ seed = 0;
+ }
+
+ fprintf(stdout, "Validated buffer\n");
+}
+
+#define run_command(cmd, ...) \
+ ({ \
+ char command[256]; \
+ memset(command, 0, sizeof(command)); \
+ snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
+ printf("Running: %s\n", command); \
+ system(command); \
+ })
+
+static int reset_flow_steering(void)
+{
+ int ret = 0;
+
+ ret = run_command("sudo ethtool -K %s ntuple off", ifname);
+ if (ret)
+ return ret;
+
+ return run_command("sudo ethtool -K %s ntuple on", ifname);
+}
+
+static int configure_headersplit(bool on)
+{
+ return run_command("sudo ethtool -G %s tcp-data-split %s", ifname,
+ on ? "on" : "off");
+}
+
+static int configure_rss(void)
+{
+ return run_command("sudo ethtool -X %s equal %d", ifname, start_queue);
+}
+
+static int configure_channels(unsigned int rx, unsigned int tx)
+{
+ return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
+}
+
+static int configure_flow_steering(void)
+{
+ return run_command("sudo ethtool -N %s flow-type tcp4 src-ip %s dst-ip %s src-port %s dst-port %s queue %d",
+ ifname, client_ip, server_ip, port, port, start_queue);
+}
+
+static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
+ struct netdev_queue_id *queues,
+ unsigned int n_queue_index, struct ynl_sock **ys)
+{
+ struct netdev_bind_rx_req *req = NULL;
+ struct netdev_bind_rx_rsp *rsp = NULL;
+ struct ynl_error yerr;
+
+ *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
+ if (!*ys) {
+ fprintf(stderr, "YNL: %s\n", yerr.msg);
+ return -1;
+ }
+
+ req = netdev_bind_rx_req_alloc();
+ netdev_bind_rx_req_set_ifindex(req, ifindex);
+ netdev_bind_rx_req_set_fd(req, dmabuf_fd);
+ __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
+
+ rsp = netdev_bind_rx(*ys, req);
+ if (!rsp) {
+ perror("netdev_bind_rx");
+ goto err_close;
+ }
+
+ if (!rsp->_present.id) {
+ perror("id not present");
+ goto err_close;
+ }
+
+ printf("got dmabuf id=%d\n", rsp->id);
+ dmabuf_id = rsp->id;
+
+ netdev_bind_rx_req_free(req);
+ netdev_bind_rx_rsp_free(rsp);
+
+ return 0;
+
+err_close:
+ fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
+ netdev_bind_rx_req_free(req);
+ ynl_sock_destroy(*ys);
+ return -1;
+}
+
+static void create_udmabuf(int *devfd, int *memfd, int *buf, size_t dmabuf_size)
+{
+ struct udmabuf_create create;
+ int ret;
+
+ *devfd = open("/dev/udmabuf", O_RDWR);
+ if (*devfd < 0) {
+ error(70, 0,
+ "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+ TEST_PREFIX);
+ }
+
+ *memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
+ if (*memfd < 0)
+ error(70, 0, "%s: [skip,no-memfd]\n", TEST_PREFIX);
+
+ /* Required for udmabuf */
+ ret = fcntl(*memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+ if (ret < 0)
+ error(73, 0, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
+
+ ret = ftruncate(*memfd, dmabuf_size);
+ if (ret == -1)
+ error(74, 0, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
+
+ memset(&create, 0, sizeof(create));
+
+ create.memfd = *memfd;
+ create.offset = 0;
+ create.size = dmabuf_size;
+ *buf = ioctl(*devfd, UDMABUF_CREATE, &create);
+ if (*buf < 0)
+ error(75, 0, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
+}
+
+int do_server(void)
+{
+ char ctrl_data[sizeof(int) * 20000];
+ struct netdev_queue_id *queues;
+ size_t non_page_aligned_frags = 0;
+ struct sockaddr_in client_addr;
+ struct sockaddr_in server_sin;
+ size_t page_aligned_frags = 0;
+ int devfd, memfd, buf, ret;
+ size_t total_received = 0;
+ socklen_t client_addr_len;
+ bool is_devmem = false;
+ char *buf_mem = NULL;
+ struct ynl_sock *ys;
+ size_t dmabuf_size;
+ char iobuf[819200];
+ char buffer[256];
+ int socket_fd;
+ int client_fd;
+ size_t i = 0;
+ int opt = 1;
+
+ dmabuf_size = getpagesize() * NUM_PAGES;
+
+ create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
+
+ if (reset_flow_steering())
+ error(1, 0, "Failed to reset flow steering\n");
+
+ /* Configure RSS to divert all traffic from our devmem queues */
+ if (configure_rss())
+ error(1, 0, "Failed to configure rss\n");
+
+ /* Flow steer our devmem flows to start_queue */
+ if (configure_flow_steering())
+ error(1, 0, "Failed to configure flow steering\n");
+
+ sleep(1);
+
+ queues = malloc(sizeof(*queues) * num_queues);
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ error(1, 0, "Failed to bind\n");
+
+ buf_mem = mmap(NULL, dmabuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ buf, 0);
+ if (buf_mem == MAP_FAILED)
+ error(1, 0, "mmap()");
+
+ server_sin.sin_family = AF_INET;
+ server_sin.sin_port = htons(atoi(port));
+
+ ret = inet_pton(server_sin.sin_family, server_ip, &server_sin.sin_addr);
+ if (socket < 0)
+ error(79, 0, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+
+ socket_fd = socket(server_sin.sin_family, SOCK_STREAM, 0);
+ if (socket < 0)
+ error(errno, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
+
+ ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEPORT, &opt,
+ sizeof(opt));
+ if (ret)
+ error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
+
+ ret = setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &opt,
+ sizeof(opt));
+ if (ret)
+ error(errno, errno, "%s: [FAIL, set sock opt]\n", TEST_PREFIX);
+
+ printf("binding to address %s:%d\n", server_ip,
+ ntohs(server_sin.sin_port));
+
+ ret = bind(socket_fd, &server_sin, sizeof(server_sin));
+ if (ret)
+ error(errno, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
+
+ ret = listen(socket_fd, 1);
+ if (ret)
+ error(errno, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
+
+ client_addr_len = sizeof(client_addr);
+
+ inet_ntop(server_sin.sin_family, &server_sin.sin_addr, buffer,
+ sizeof(buffer));
+ printf("Waiting or connection on %s:%d\n", buffer,
+ ntohs(server_sin.sin_port));
+ client_fd = accept(socket_fd, &client_addr, &client_addr_len);
+
+ inet_ntop(client_addr.sin_family, &client_addr.sin_addr, buffer,
+ sizeof(buffer));
+ printf("Got connection from %s:%d\n", buffer,
+ ntohs(client_addr.sin_port));
+
+ while (1) {
+ struct iovec iov = { .iov_base = iobuf,
+ .iov_len = sizeof(iobuf) };
+ struct dmabuf_cmsg *dmabuf_cmsg = NULL;
+ struct dma_buf_sync sync = { 0 };
+ struct cmsghdr *cm = NULL;
+ struct msghdr msg = { 0 };
+ struct dmabuf_token token;
+ ssize_t ret;
+
+ is_devmem = false;
+ printf("\n\n");
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = ctrl_data;
+ msg.msg_controllen = sizeof(ctrl_data);
+ ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
+ printf("recvmsg ret=%ld\n", ret);
+ if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
+ continue;
+ if (ret < 0) {
+ perror("recvmsg");
+ continue;
+ }
+ if (ret == 0) {
+ printf("client exited\n");
+ goto cleanup;
+ }
+
+ i++;
+ for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+ if (cm->cmsg_level != SOL_SOCKET ||
+ (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
+ cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
+ fprintf(stdout, "skipping non-devmem cmsg\n");
+ continue;
+ }
+
+ dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
+ is_devmem = true;
+
+ if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
+ /* TODO: process data copied from skb's linear
+ * buffer.
+ */
+ fprintf(stdout,
+ "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
+ dmabuf_cmsg->frag_size);
+
+ continue;
+ }
+
+ token.token_start = dmabuf_cmsg->frag_token;
+ token.token_count = 1;
+
+ total_received += dmabuf_cmsg->frag_size;
+ printf("received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
+ dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
+ dmabuf_cmsg->frag_offset % getpagesize(),
+ dmabuf_cmsg->frag_offset, dmabuf_cmsg->frag_size,
+ dmabuf_cmsg->frag_token, total_received,
+ dmabuf_cmsg->dmabuf_id);
+
+ if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
+ error(1, 0,
+ "received on wrong dmabuf_id: flow steering error\n");
+
+ if (dmabuf_cmsg->frag_size % getpagesize())
+ non_page_aligned_frags++;
+ else
+ page_aligned_frags++;
+
+ sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_START;
+ ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
+
+ if (do_validation)
+ validate_buffer(
+ ((unsigned char *)buf_mem) +
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size);
+ else
+ print_nonzero_bytes(
+ ((unsigned char *)buf_mem) +
+ dmabuf_cmsg->frag_offset,
+ dmabuf_cmsg->frag_size);
+
+ sync.flags = DMA_BUF_SYNC_READ | DMA_BUF_SYNC_END;
+ ioctl(buf, DMA_BUF_IOCTL_SYNC, &sync);
+
+ ret = setsockopt(client_fd, SOL_SOCKET,
+ SO_DEVMEM_DONTNEED, &token,
+ sizeof(token));
+ if (ret != 1)
+ error(1, 0,
+ "SO_DEVMEM_DONTNEED not enough tokens");
+ }
+ if (!is_devmem)
+ error(1, 0, "flow steering error\n");
+
+ printf("total_received=%lu\n", total_received);
+ }
+
+ fprintf(stdout, "%s: ok\n", TEST_PREFIX);
+
+ fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ page_aligned_frags, non_page_aligned_frags);
+
+ fprintf(stdout, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
+ page_aligned_frags, non_page_aligned_frags);
+
+cleanup:
+
+ munmap(buf_mem, dmabuf_size);
+ close(client_fd);
+ close(socket_fd);
+ close(buf);
+ close(memfd);
+ close(devfd);
+ ynl_sock_destroy(ys);
+
+ return 0;
+}
+
+void run_devmem_tests(void)
+{
+ struct netdev_queue_id *queues;
+ int devfd, memfd, buf;
+ struct ynl_sock *ys;
+ size_t dmabuf_size;
+ size_t i = 0;
+
+ dmabuf_size = getpagesize() * NUM_PAGES;
+
+ create_udmabuf(&devfd, &memfd, &buf, dmabuf_size);
+
+ /* Configure RSS to divert all traffic from our devmem queues */
+ if (configure_rss())
+ error(1, 0, "rss error\n");
+
+ queues = calloc(num_queues, sizeof(*queues));
+
+ if (configure_headersplit(1))
+ error(1, 0, "Failed to configure header split\n");
+
+ if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ error(1, 0, "Binding empty queues array should have failed\n");
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (configure_headersplit(0))
+ error(1, 0, "Failed to configure header split\n");
+
+ if (!bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ error(1, 0, "Configure dmabuf with header split off should have failed\n");
+
+ if (configure_headersplit(1))
+ error(1, 0, "Failed to configure header split\n");
+
+ for (i = 0; i < num_queues; i++) {
+ queues[i]._present.type = 1;
+ queues[i]._present.id = 1;
+ queues[i].type = NETDEV_QUEUE_TYPE_RX;
+ queues[i].id = start_queue + i;
+ }
+
+ if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys))
+ error(1, 0, "Failed to bind\n");
+
+ /* Deactivating a bound queue should not be legal */
+ if (!configure_channels(num_queues, num_queues - 1))
+ error(1, 0, "Deactivating a bound queue should be illegal.\n");
+
+ /* Closing the netlink socket does an implicit unbind */
+ ynl_sock_destroy(ys);
+}
+
+int main(int argc, char *argv[])
+{
+ int is_server = 0, opt;
+
+ while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
+ switch (opt) {
+ case 'l':
+ is_server = 1;
+ break;
+ case 's':
+ server_ip = optarg;
+ break;
+ case 'c':
+ client_ip = optarg;
+ break;
+ case 'p':
+ port = optarg;
+ break;
+ case 'v':
+ do_validation = atoll(optarg);
+ break;
+ case 'q':
+ num_queues = atoi(optarg);
+ break;
+ case 't':
+ start_queue = atoi(optarg);
+ break;
+ case 'f':
+ ifname = optarg;
+ break;
+ case '?':
+ printf("unknown option: %c\n", optopt);
+ break;
+ }
+ }
+
+ ifindex = if_nametoindex(ifname);
+
+ for (; optind < argc; optind++)
+ printf("extra arguments: %s\n", argv[optind]);
+
+ run_devmem_tests();
+
+ if (is_server)
+ return do_server();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/packetdrill/Makefile b/tools/testing/selftests/net/packetdrill/Makefile
index 870f7258dc8d..31cfb666ba8b 100644
--- a/tools/testing/selftests/net/packetdrill/Makefile
+++ b/tools/testing/selftests/net/packetdrill/Makefile
@@ -2,6 +2,7 @@
TEST_INCLUDES := ksft_runner.sh \
defaults.sh \
+ set_sysctls.py \
../../kselftest/ktap_helpers.sh
TEST_PROGS := $(wildcard *.pkt)
diff --git a/tools/testing/selftests/net/packetdrill/config b/tools/testing/selftests/net/packetdrill/config
index 0d402830f18d..0237ed98f3c0 100644
--- a/tools/testing/selftests/net/packetdrill/config
+++ b/tools/testing/selftests/net/packetdrill/config
@@ -1,5 +1,11 @@
CONFIG_IPV6=y
+CONFIG_HZ_1000=y
+CONFIG_HZ=1000
+CONFIG_NET_NS=y
CONFIG_NET_SCH_FIFO=y
+CONFIG_NET_SCH_FQ=y
CONFIG_PROC_SYSCTL=y
+CONFIG_SYN_COOKIES=y
+CONFIG_TCP_CONG_CUBIC=y
CONFIG_TCP_MD5SIG=y
CONFIG_TUN=y
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
index 2f62caccbbbc..7478c0c0c9aa 100755
--- a/tools/testing/selftests/net/packetdrill/ksft_runner.sh
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -33,9 +33,9 @@ fi
ktap_print_header
ktap_set_plan 2
-packetdrill ${ipv4_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv4_args[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv4" || ktap_test_fail "ipv4"
-packetdrill ${ipv6_args[@]} $(basename $script) > /dev/null \
+unshare -n packetdrill ${ipv6_args[@]} $(basename $script) > /dev/null \
&& ktap_test_pass "ipv6" || ktap_test_fail "ipv6"
ktap_finished
diff --git a/tools/testing/selftests/net/packetdrill/set_sysctls.py b/tools/testing/selftests/net/packetdrill/set_sysctls.py
new file mode 100755
index 000000000000..5ddf456ae973
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/set_sysctls.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""Sets sysctl values and writes a file that restores them.
+
+The arguments are of the form "<proc-file>=<val>" separated by spaces.
+The program first reads the current value of the proc-file and creates
+a shell script named "/tmp/sysctl_restore_${PACKETDRILL_PID}.sh" which
+restores the values when executed. It then sets the new values.
+
+PACKETDRILL_PID is set by packetdrill to the pid of itself, so a .pkt
+file could restore sysctls by running `/tmp/sysctl_restore_${PPID}.sh`
+at the end.
+"""
+
+import os
+import subprocess
+import sys
+
+filename = '/tmp/sysctl_restore_%s.sh' % os.environ['PACKETDRILL_PID']
+
+# Open file for restoring sysctl values
+restore_file = open(filename, 'w')
+print('#!/bin/bash', file=restore_file)
+
+for a in sys.argv[1:]:
+ sysctl = a.split('=')
+ # sysctl[0] contains the proc-file name, sysctl[1] the new value
+
+ # read current value and add restore command to file
+ cur_val = subprocess.check_output(['cat', sysctl[0]], universal_newlines=True)
+ print('echo "%s" > %s' % (cur_val.strip(), sysctl[0]), file=restore_file)
+
+ # set new value
+ cmd = 'echo "%s" > %s' % (sysctl[1], sysctl[0])
+ os.system(cmd)
+
+os.system('chmod u+x %s' % filename)
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt
new file mode 100644
index 000000000000..795c476d222d
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-1pkt.pkt
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when not application-limited, so that
+// the cwnd continues to grow.
+// In this variant, the receiver ACKs every packet.
+
+// Set up config. To keep things simple, disable the
+// mechanism that defers sending in order to send bigger TSO packets.
+`./defaults.sh
+sysctl -q net.ipv4.tcp_tso_win_divisor=100`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 30000) = 30000
+ +0 > P. 1:10001(10000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
++.105 < . 1:1(0) ack 1001 win 257
+ +0 > P. 10001:12001(2000) ack 1
+
+ +0 < . 1:1(0) ack 2001 win 257
+ +0 > P. 12001:14001(2000) ack 1
+
++.005 < . 1:1(0) ack 3001 win 257
+ +0 > P. 14001:16001(2000) ack 1
+
+ +0 < . 1:1(0) ack 4001 win 257
+ +0 > P. 16001:18001(2000) ack 1
+
++.005 < . 1:1(0) ack 5001 win 257
+ +0 > P. 18001:20001(2000) ack 1
+
+ +0 < . 1:1(0) ack 6001 win 257
+ +0 > P. 20001:22001(2000) ack 1
+
++.005 < . 1:1(0) ack 7001 win 257
+ +0 > P. 22001:24001(2000) ack 1
+
+ +0 < . 1:1(0) ack 8001 win 257
+ +0 > P. 24001:26001(2000) ack 1
+
++.005 < . 1:1(0) ack 9001 win 257
+ +0 > P. 26001:28001(2000) ack 1
+
+ +0 < . 1:1(0) ack 10001 win 257
+ +0 > P. 28001:30001(2000) ack 1
+
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt
new file mode 100644
index 000000000000..9212ae1fd0f2
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-5pkt.pkt
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when an outstanding flight of packets is
+// less than the current cwnd, and not big enough to bump up cwnd.
+//
+// In this variant, the receiver ACKs every other packet,
+// approximating standard delayed ACKs.
+
+// Set up config.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+
+// Only send 5 packets.
+ +0 write(4, ..., 5000) = 5000
+ +0 > P. 1:5001(5000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 2001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 4001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 5001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 10, 'cwnd=%d' % tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt
new file mode 100644
index 000000000000..416c901ddf51
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt-send-6pkt.pkt
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when an outstanding flight of packets is
+// less than the current cwnd, but still big enough that in slow
+// start we want to increase our cwnd a little.
+//
+// In this variant, the receiver ACKs every other packet,
+// approximating standard delayed ACKs.
+
+// Set up config.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+
+// Only send 6 packets.
+ +0 write(4, ..., 6000) = 6000
+ +0 > P. 1:6001(6000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 2001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 4001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }%
+
+ +0 < . 1:1(0) ack 6001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 12, 'cwnd=%d' % tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt
new file mode 100644
index 000000000000..a894b7d4559c
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-2pkt.pkt
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when not application-limited, so that
+// the cwnd continues to grow.
+// In this variant, the receiver ACKs every other packet,
+// approximating standard delayed ACKs.
+
+// Set up config. To keep things simple, disable the
+// mechanism that defers sending in order to send bigger TSO packets.
+`./defaults.sh
+sysctl -q net.ipv4.tcp_tso_win_divisor=100`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 30000) = 30000
+ +0 > P. 1:10001(10000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
++.105 < . 1:1(0) ack 2001 win 257
+ +0 > P. 10001:14001(4000) ack 1
+
++.005 < . 1:1(0) ack 4001 win 257
+ +0 > P. 14001:18001(4000) ack 1
+
++.005 < . 1:1(0) ack 6001 win 257
+ +0 > P. 18001:22001(4000) ack 1
+
++.005 < . 1:1(0) ack 8001 win 257
+ +0 > P. 22001:26001(4000) ack 1
+
++.005 < . 1:1(0) ack 10001 win 257
+ +0 > P. 26001:30001(4000) ack 1
+
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt
new file mode 100644
index 000000000000..065fae9e9abd
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-ack-per-4pkt.pkt
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when not application-limited, so that
+// the cwnd continues to grow.
+// In this variant, the receiver sends one ACK per 4 packets.
+
+// Set up config. To keep things simple, disable the
+// mechanism that defers sending in order to send bigger TSO packets.
+`./defaults.sh
+sysctl -q net.ipv4.tcp_tso_win_divisor=100`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 30000) = 30000
+ +0 > P. 1:10001(10000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +.11 < . 1:1(0) ack 4001 win 257
+ +0 > P. 10001:18001(8000) ack 1
+
+ +.01 < . 1:1(0) ack 8001 win 257
+ +0 > P. 18001:26001(8000) ack 1
+
++.005 < . 1:1(0) ack 10001 win 257
+ +0 > P. 26001:30001(4000) ack 1
+
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt
new file mode 100644
index 000000000000..11b213be1138
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-idle.pkt
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start after idle
+// This test expects tso size to be at least initial cwnd * mss
+
+`./defaults.sh
+./set_sysctls.py /proc/sys/net/ipv4/tcp_slow_start_after_idle=1 \
+ /proc/sys/net/ipv4/tcp_min_tso_segs=10`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 65535 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 511
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 26000) = 26000
+ +0 > P. 1:5001(5000) ack 1
+ +0 > P. 5001:10001(5000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +.1 < . 1:1(0) ack 10001 win 511
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
+ +0 > P. 10001:20001(10000) ack 1
+ +0 > P. 20001:26001(6000) ack 1
+
+ +.1 < . 1:1(0) ack 26001 win 511
+ +0 %{ assert tcpi_snd_cwnd == 36, tcpi_snd_cwnd }%
+
+ +2 write(4, ..., 20000) = 20000
+// If slow start after idle works properly, we should send 5 MSS here (cwnd/2)
+ +0 > P. 26001:31001(5000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+// Reset sysctls
+`/tmp/sysctl_restore_${PPID}.sh`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt
new file mode 100644
index 000000000000..577ed8c8852c
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-after-win-update.pkt
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start after window update
+// This test expects tso size to be at least initial cwnd * mss
+
+`./defaults.sh
+./set_sysctls.py /proc/sys/net/ipv4/tcp_slow_start_after_idle=1 \
+ /proc/sys/net/ipv4/tcp_min_tso_segs=10`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 65535 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 511
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 26000) = 26000
+ +0 > P. 1:5001(5000) ack 1
+ +0 > P. 5001:10001(5000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
+ +.1 < . 1:1(0) ack 10001 win 511
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
+ +0 > P. 10001:20001(10000) ack 1
+ +0 > P. 20001:26001(6000) ack 1
+
+ +.1 < . 1:1(0) ack 26001 win 0
+ +0 %{ assert tcpi_snd_cwnd == 36, tcpi_snd_cwnd }%
+
+ +0 write(4, ..., 20000) = 20000
+// 1st win0 probe
++.3~+.310 > . 26000:26000(0) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 36, tcpi_snd_cwnd }%
+
+// 2nd win0 probe
++.6~+.620 > . 26000:26000(0) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 36, tcpi_snd_cwnd }%
+
+// 3rd win0 probe
++1.2~+1.240 > . 26000:26000(0) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 36, tcpi_snd_cwnd }%
+
+ +.9 < . 1:1(0) ack 26001 win 511
+ +0 > P. 26001:31001(5000) ack 1
+
+// Reset sysctls
+`/tmp/sysctl_restore_${PPID}.sh`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt
new file mode 100644
index 000000000000..869f32c35a2a
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited-9-packets-out.pkt
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when application-limited: in this case,
+// with IW10, if we don't fully use our cwnd but instead
+// send just 9 packets, then cwnd should grow to twice that
+// value, or 18 packets.
+
+// Set up config.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 9000) = 9000
+ +0 > P. 1:9001(9000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
++.105 < . 1:1(0) ack 2001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 12, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 4001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 14, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 6001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 16, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 8001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 18, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 9001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 18, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt
new file mode 100644
index 000000000000..0f77b7955db6
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-app-limited.pkt
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when application-limited: in this case,
+// with IW10, if we send exactly 10 packets then cwnd should grow to 20.
+
+// Set up config.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 257
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 10000) = 10000
+ +0 > P. 1:10001(10000) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
++.105 < . 1:1(0) ack 2001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 12, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 4001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 14, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 6001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 16, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 8001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 18, tcpi_snd_cwnd }%
+
++.005 < . 1:1(0) ack 10001 win 257
+ +0 %{ assert tcpi_snd_cwnd == 20, tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt
new file mode 100644
index 000000000000..7e9c83d617c2
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_slow_start_slow-start-fq-ack-per-2pkt.pkt
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+// Test of slow start when not application-limited, so that
+// the cwnd continues to grow, even if TSQ triggers.
+// In this variant, the receiver ACKs every other packet,
+// approximating standard delayed ACKs.
+
+// Note we use FQ/pacing to check if TCP Small Queues is not hurting
+
+`./defaults.sh
+tc qdisc replace dev tun0 root fq
+sysctl -q net/ipv4/tcp_pacing_ss_ratio=200
+sysctl -e -q net.ipv4.tcp_min_tso_segs=2`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +.1 < S 0:0(0) win 32792 <mss 1460,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +.1 < . 1:1(0) ack 1 win 500
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_SNDBUF, [200000], 4) = 0
+
+ +0 write(4, ..., 40000) = 40000
+// This might change if we cook the initial packet with 10 MSS.
+ +0 > P. 1:2921(2920) ack 1
+ +0 > P. 2921:5841(2920) ack 1
+ +0 > P. 5841:8761(2920) ack 1
+ +0 > P. 8761:11681(2920) ack 1
+ +0 > P. 11681:14601(2920) ack 1
+ +0 %{ assert tcpi_snd_cwnd == 10, tcpi_snd_cwnd }%
+
++.105 < . 1:1(0) ack 2921 win 500
+ +0 %{ assert tcpi_snd_cwnd == 12, tcpi_snd_cwnd }%
+
+// Note: after this commit : "net_sched: sch_fq: account for schedule/timers drifts"
+// FQ notices that this packet missed the 'time to send next packet' computed
+// when prior packet (11681:14601(2920)) was sent.
+// So FQ will allow following packet to be sent a bit earlier (quantum/2)
+// (FQ commit allows an application/cwnd limited flow to get at most quantum/2 extra credit)
+ +0 > P. 14601:17521(2920) ack 1
+
++.003 < . 1:1(0) ack 5841 win 500
+ +0 %{ assert tcpi_snd_cwnd == 14, tcpi_snd_cwnd }%
+
++.001 > P. 17521:20441(2920) ack 1
+
++.001 < . 1:1(0) ack 8761 win 500
+ +0 %{ assert tcpi_snd_cwnd == 16, tcpi_snd_cwnd }%
+
+// remaining packets are delivered at a constant rate.
++.007 > P. 20441:23361(2920) ack 1
+
++.002 < . 1:1(0) ack 11681 win 500
+ +0 %{ assert tcpi_snd_cwnd == 18, tcpi_snd_cwnd }%
++.001 < . 1:1(0) ack 14601 win 500
+
++.004 > P. 23361:26281(2920) ack 1
+
++.007 > P. 26281:29201(2920) ack 1
+
+ +0 %{ assert tcpi_snd_cwnd == 20, 'cwnd=%d' % tcpi_snd_cwnd }%
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
new file mode 100644
index 000000000000..a82c8899d36b
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_basic.pkt
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+// basic zerocopy test:
+//
+// send a packet with MSG_ZEROCOPY and receive the notification ID
+// repeat and verify IDs are consecutive
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 4001:8001(4000) ack 1
+ +0 < . 1:1(0) ack 8001 win 257
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=1,
+ ee_data=1}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
new file mode 100644
index 000000000000..c01915e7f4a1
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_batch.pkt
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+// batch zerocopy test:
+//
+// send multiple packets, then read one range of all notifications.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_MARK, [666], 4) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 4001:8001(4000) ack 1
+ +0 < . 1:1(0) ack 8001 win 257
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=1}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
new file mode 100644
index 000000000000..6509882932e9
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_client.pkt
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Minimal client-side zerocopy test
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0...0 connect(4, ..., ...) = 0
+
+ +0 > S 0:0(0) <mss 1460,sackOK,TS val 0 ecr 0,nop,wscale 8>
+ +0 < S. 0:0(0) ack 1 win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > . 1:1(0) ack 1
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
new file mode 100644
index 000000000000..2cd78755cb2a
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_closed.pkt
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+// send with MSG_ZEROCOPY on a non-established socket
+//
+// verify that a send in state TCP_CLOSE correctly aborts the zerocopy
+// operation, specifically it does not increment the zerocopy counter.
+//
+// First send on a closed socket and wait for (absent) notification.
+// Then connect and send and verify that notification nr. is zero.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 4
+ +0 setsockopt(4, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = -1 EPIPE (Broken pipe)
+
+ +0.1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[]}, MSG_ERRQUEUE) = -1 EAGAIN (Resource temporarily unavailable)
+
+ +0...0 connect(4, ..., ...) = 0
+
+ +0 > S 0:0(0) <mss 1460,sackOK,TS val 0 ecr 0,nop,wscale 8>
+ +0 < S. 0:0(0) ack 1 win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > . 1:1(0) ack 1
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
new file mode 100644
index 000000000000..7671c20e01cf
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_edge.pkt
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+// epoll zerocopy test:
+//
+// EPOLLERR is known to be not edge-triggered unlike EPOLLIN and EPOLLOUT but
+// it is not level-triggered either.
+//
+// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
+// is correctly fired only once, when EPOLLET is set. send another packet with
+// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 epoll_create(1) = 5
+ +0 epoll_ctl(5, EPOLL_CTL_ADD, 4, {events=EPOLLOUT|EPOLLET, fd=4}) = 0
+ +0 epoll_wait(5, {events=EPOLLOUT, fd=4}, 1, 0) = 1
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 4001:8001(4000) ack 1
+ +0 < . 1:1(0) ack 8001 win 257
+
+// receive only one EPOLLERR for the two sends above.
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 8001:12001(4000) ack 1
+ +0 < . 1:1(0) ack 12001 win 257
+
+// receive only one EPOLLERR for the third send above.
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=2}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
new file mode 100644
index 000000000000..fadc480fdb7f
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_exclusive.pkt
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+// epoll zerocopy test:
+//
+// EPOLLERR is known to be not edge-triggered unlike EPOLLIN and EPOLLOUT but
+// it is not level-triggered either. this tests verify that the same behavior is
+// maintained when we have EPOLLEXCLUSIVE.
+//
+// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
+// is correctly fired only once, when EPOLLET is set. send another packet with
+// MSG_ZEROCOPY. confirm that EPOLLERR is correctly fired again only once.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 epoll_create(1) = 5
+ +0 epoll_ctl(5, EPOLL_CTL_ADD, 4,
+ {events=EPOLLOUT|EPOLLET|EPOLLEXCLUSIVE, fd=4}) = 0
+ +0 epoll_wait(5, {events=EPOLLOUT, fd=4}, 1, 0) = 1
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 4001:8001(4000) ack 1
+ +0 < . 1:1(0) ack 8001 win 257
+
+// receive only one EPOLLERR for the two sends above.
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 8001:12001(4000) ack 1
+ +0 < . 1:1(0) ack 12001 win 257
+
+// receive only one EPOLLERR for the third send above.
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=2}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
new file mode 100644
index 000000000000..5bfa0d1d2f4a
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_epoll_oneshot.pkt
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+// epoll zerocopy test:
+//
+// This is a test to confirm that EPOLLERR is only fired once for an FD when
+// EPOLLONESHOT is set.
+//
+// fire two sends with MSG_ZEROCOPY and receive the acks. confirm that EPOLLERR
+// is correctly fired only once, when EPOLLONESHOT is set. send another packet
+// with MSG_ZEROCOPY. confirm that EPOLLERR is not fired. Rearm the FD and
+// confirm that EPOLLERR is correctly set.
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ +0 fcntl(4, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 epoll_create(1) = 5
+ +0 epoll_ctl(5, EPOLL_CTL_ADD, 4,
+ {events=EPOLLOUT|EPOLLET|EPOLLONESHOT, fd=4}) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 1:4001(4000) ack 1
+ +0 < . 1:1(0) ack 4001 win 257
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 4001:8001(4000) ack 1
+ +0 < . 1:1(0) ack 8001 win 257
+
+// receive only one EPOLLERR for the two sends above.
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 send(4, ..., 4000, MSG_ZEROCOPY) = 4000
+ +0 > P. 8001:12001(4000) ack 1
+ +0 < . 1:1(0) ack 12001 win 257
+
+// receive no EPOLLERR for the third send above.
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+// rearm the FD and verify the EPOLLERR is fired again.
+ +0 epoll_ctl(5, EPOLL_CTL_MOD, 4, {events=EPOLLOUT|EPOLLONESHOT, fd=4}) = 0
+ +0 epoll_wait(5, {events=EPOLLERR|EPOLLOUT, fd=4}, 1, 0) = 1
+ +0 epoll_wait(5, {events=0, ptr=0}, 1, 0) = 0
+
+ +0 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=2}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
new file mode 100644
index 000000000000..4a73bbf46961
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-client.pkt
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+// Fastopen client zerocopy test:
+//
+// send data with MSG_FASTOPEN | MSG_ZEROCOPY and verify that the
+// kernel returns the notification ID.
+//
+// Fastopen requires a stored cookie. Create two sockets. The first
+// one will have no data in the initial send. On return 0 the
+// zerocopy notification counter is not incremented. Verify this too.
+
+`./defaults.sh`
+
+// Send a FastOpen request, no cookie yet so no data in SYN
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 fcntl(3, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 sendto(3, ..., 500, MSG_FASTOPEN|MSG_ZEROCOPY, ..., ...) = -1 EINPROGRESS (Operation now in progress)
+ +0 > S 0:0(0) <mss 1460,sackOK,TS val 1000 ecr 0,nop,wscale 8,FO,nop,nop>
+ +.01 < S. 123:123(0) ack 1 win 14600 <mss 940,TS val 2000 ecr 1000,sackOK,nop,wscale 6, FO abcd1234,nop,nop>
+ +0 > . 1:1(0) ack 1 <nop,nop,TS val 1001 ecr 2000>
+
+// Read from error queue: no zerocopy notification
+ +1 recvmsg(3, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[]}, MSG_ERRQUEUE) = -1 EAGAIN (Resource temporarily unavailable)
+
+ +.01 close(3) = 0
+ +0 > F. 1:1(0) ack 1 <nop,nop,TS val 1002 ecr 2000>
+ +.01 < F. 1:1(0) ack 2 win 92 <nop,nop,TS val 2001 ecr 1002>
+ +0 > . 2:2(0) ack 2 <nop,nop,TS val 1003 ecr 2001>
+
+// Send another Fastopen request, now SYN will have data
+ +.07 `sysctl -q net.ipv4.tcp_timestamps=0`
+ +.1 socket(..., SOCK_STREAM, IPPROTO_TCP) = 5
+ +0 fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
+ +0 setsockopt(5, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 sendto(5, ..., 500, MSG_FASTOPEN|MSG_ZEROCOPY, ..., ...) = 500
+ +0 > S 0:500(500) <mss 1460,nop,nop,sackOK,nop,wscale 8,FO abcd1234,nop,nop>
+ +.05 < S. 5678:5678(0) ack 501 win 14600 <mss 1460,nop,nop,sackOK,nop,wscale 6>
+ +0 > . 501:501(0) ack 1
+
+// Read from error queue: now has first zerocopy notification
+ +0.5 recvmsg(5, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
new file mode 100644
index 000000000000..36086c5877ce
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_fastopen-server.pkt
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+// Fastopen server zerocopy test:
+//
+// send data with MSG_FASTOPEN | MSG_ZEROCOPY and verify that the
+// kernel returns the notification ID.
+
+`./defaults.sh
+ ./set_sysctls.py /proc/sys/net/ipv4/tcp_fastopen=0x207`
+
+// Set up a TFO server listening socket.
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +.1 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+ +0 setsockopt(3, SOL_TCP, TCP_FASTOPEN, [2], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+
+// Client sends a SYN with data.
+ +.1 < S 0:1000(1000) win 32792 <mss 1460,sackOK,nop,nop>
+ +0 > S. 0:0(0) ack 1001 <mss 1460,nop,nop,sackOK>
+
+// Server accepts and replies with data.
++.005 accept(3, ..., ...) = 4
+ +0 read(4, ..., 1024) = 1000
+ +0 sendto(4, ..., 1000, MSG_ZEROCOPY, ..., ...) = 1000
+ +0 > P. 1:1001(1000) ack 1001
+ +.05 < . 1001:1001(0) ack 1001 win 32792
+
+// Read from error queue: now has first zerocopy notification
+ +0.1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
+
+`/tmp/sysctl_restore_${PPID}.sh`
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
new file mode 100644
index 000000000000..672f817faca0
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_maxfrags.pkt
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+// tcp_MAX_SKB_FRAGS test
+//
+// Verify that sending an iovec of tcp_MAX_SKB_FRAGS + 1 elements will
+// 1) fit in a single packet without zerocopy
+// 2) spill over into a second packet with zerocopy,
+// because each iovec element becomes a frag
+// 3) the PSH bit is set on an skb when it runs out of fragments
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+
+ // Each pinned zerocopy page is fully accounted to skb->truesize.
+ // This test generates a worst case packet with each frag storing
+ // one byte, but increasing truesize with a page (64KB on PPC).
+ +0 setsockopt(3, SOL_SOCKET, SO_SNDBUF, [2000000], 4) = 0
+
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ // send an iov of 18 elements: just becomes a linear skb
+ +0 sendmsg(4, {msg_name(...)=...,
+ msg_iov(18)=[{..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}],
+ msg_flags=0}, 0) = 18
+
+ +0 > P. 1:19(18) ack 1
+ +0 < . 1:1(0) ack 19 win 257
+
+ // send a zerocopy iov of 18 elements:
+ +1 sendmsg(4, {msg_name(...)=...,
+ msg_iov(18)=[{..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}],
+ msg_flags=0}, MSG_ZEROCOPY) = 18
+
+ // verify that it is split in one skb of 17 frags + 1 of 1 frag
+ // verify that both have the PSH bit set
+ +0 > P. 19:36(17) ack 1
+ +0 < . 1:1(0) ack 36 win 257
+
+ +0 > P. 36:37(1) ack 1
+ +0 < . 1:1(0) ack 37 win 257
+
+ +1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
+
+ // send a zerocopy iov of 64 elements:
+ +0 sendmsg(4, {msg_name(...)=...,
+ msg_iov(64)=[{..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1},
+ {..., 1}, {..., 1}, {..., 1}, {..., 1}],
+ msg_flags=0}, MSG_ZEROCOPY) = 64
+
+ // verify that it is split in skbs with 17 frags
+ +0 > P. 37:54(17) ack 1
+ +0 < . 1:1(0) ack 54 win 257
+
+ +0 > P. 54:71(17) ack 1
+ +0 < . 1:1(0) ack 71 win 257
+
+ +0 > P. 71:88(17) ack 1
+ +0 < . 1:1(0) ack 88 win 257
+
+ +0 > P. 88:101(13) ack 1
+ +0 < . 1:1(0) ack 101 win 257
+
+ +1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=1,
+ ee_data=1}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
new file mode 100644
index 000000000000..a9a1ac0aea4f
--- /dev/null
+++ b/tools/testing/selftests/net/packetdrill/tcp_zerocopy_small.pkt
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// small packet zerocopy test:
+//
+// verify that SO_EE_CODE_ZEROCOPY_COPIED is set on zerocopy
+// packets of all sizes, including the smallest payload, 1B.
+
+`./defaults.sh`
+
+ 0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
+ +0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+ +0 setsockopt(3, SOL_SOCKET, SO_ZEROCOPY, [1], 4) = 0
+ +0 bind(3, ..., ...) = 0
+ +0 listen(3, 1) = 0
+
+ +0 < S 0:0(0) win 32792 <mss 1000,sackOK,nop,nop,nop,wscale 7>
+ +0 > S. 0:0(0) ack 1 <mss 1460,nop,nop,sackOK,nop,wscale 8>
+ +0 < . 1:1(0) ack 1 win 257
+
+ +0 accept(3, ..., ...) = 4
+
+ // send 1B
+ +0 send(4, ..., 1, MSG_ZEROCOPY) = 1
+ +0 > P. 1:2(1) ack 1
+ +0 < . 1:1(0) ack 2 win 257
+
+ +1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=0,
+ ee_data=0}}
+ ]}, MSG_ERRQUEUE) = 0
+
+ // send 1B again
+ +0 send(4, ..., 1, MSG_ZEROCOPY) = 1
+ +0 > P. 2:3(1) ack 1
+ +0 < . 1:1(0) ack 3 win 257
+
+ +1 recvmsg(4, {msg_name(...)=...,
+ msg_iov(1)=[{...,0}],
+ msg_flags=MSG_ERRQUEUE,
+ msg_control=[
+ {cmsg_level=CMSG_LEVEL_IP,
+ cmsg_type=CMSG_TYPE_RECVERR,
+ cmsg_data={ee_errno=0,
+ ee_origin=SO_EE_ORIGIN_ZEROCOPY,
+ ee_type=0,
+ ee_code=SO_EE_CODE_ZEROCOPY_COPIED,
+ ee_info=1,
+ ee_data=1}}
+ ]}, MSG_ERRQUEUE) = 0
diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c
index 9eb42570294d..16ac4df55fdb 100644
--- a/tools/testing/selftests/net/rxtimestamp.c
+++ b/tools/testing/selftests/net/rxtimestamp.c
@@ -57,6 +57,8 @@ static struct sof_flag sof_flags[] = {
SOF_FLAG(SOF_TIMESTAMPING_SOFTWARE),
SOF_FLAG(SOF_TIMESTAMPING_RX_SOFTWARE),
SOF_FLAG(SOF_TIMESTAMPING_RX_HARDWARE),
+ SOF_FLAG(SOF_TIMESTAMPING_OPT_RX_FILTER),
+ SOF_FLAG(SOF_TIMESTAMPING_RAW_HARDWARE),
};
static struct socket_type socket_types[] = {
@@ -98,6 +100,22 @@ static struct test_case test_cases[] = {
{}
},
{
+ { .so_timestamping = SOF_TIMESTAMPING_RAW_HARDWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ {}
+ },
+ {
+ { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ {}
+ },
+ {
+ { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
+ | SOF_TIMESTAMPING_RX_SOFTWARE
+ | SOF_TIMESTAMPING_OPT_RX_FILTER },
+ { .swtstamp = true }
+ },
+ {
{ .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
| SOF_TIMESTAMPING_RX_SOFTWARE },
{ .swtstamp = true }
diff --git a/tools/testing/selftests/net/xfrm_policy_add_speed.sh b/tools/testing/selftests/net/xfrm_policy_add_speed.sh
new file mode 100755
index 000000000000..2fab29d3cb91
--- /dev/null
+++ b/tools/testing/selftests/net/xfrm_policy_add_speed.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+source lib.sh
+
+timeout=4m
+ret=0
+tmp=$(mktemp)
+cleanup() {
+ cleanup_all_ns
+ rm -f "$tmp"
+}
+
+trap cleanup EXIT
+
+maxpolicies=100000
+[ "$KSFT_MACHINE_SLOW" = "yes" ] && maxpolicies=10000
+
+do_dummies4() {
+ local dir="$1"
+ local max="$2"
+
+ local policies
+ local pfx
+ pfx=30
+ policies=0
+
+ ip netns exec "$ns" ip xfrm policy flush
+
+ for i in $(seq 1 100);do
+ local s
+ local d
+ for j in $(seq 1 255);do
+ s=$((i+0))
+ d=$((i+100))
+
+ for a in $(seq 1 8 255); do
+ policies=$((policies+1))
+ [ "$policies" -gt "$max" ] && return
+ echo xfrm policy add src 10.$s.$j.0/30 dst 10.$d.$j.$a/$pfx dir $dir action block
+ done
+ for a in $(seq 1 8 255); do
+ policies=$((policies+1))
+ [ "$policies" -gt "$max" ] && return
+ echo xfrm policy add src 10.$s.$j.$a/30 dst 10.$d.$j.0/$pfx dir $dir action block
+ done
+ done
+ done
+}
+
+setup_ns ns
+
+do_bench()
+{
+ local max="$1"
+
+ start=$(date +%s%3N)
+ do_dummies4 "out" "$max" > "$tmp"
+ if ! timeout "$timeout" ip netns exec "$ns" ip -batch "$tmp";then
+ echo "WARNING: policy insertion cancelled after $timeout"
+ ret=1
+ fi
+ stop=$(date +%s%3N)
+
+ result=$((stop-start))
+
+ policies=$(wc -l < "$tmp")
+ printf "Inserted %-06s policies in $result ms\n" $policies
+
+ have=$(ip netns exec "$ns" ip xfrm policy show | grep "action block" | wc -l)
+ if [ "$have" -ne "$policies" ]; then
+ echo "WARNING: mismatch, have $have policies, expected $policies"
+ ret=1
+ fi
+}
+
+p=100
+while [ $p -le "$maxpolicies" ]; do
+ do_bench "$p"
+ p="${p}0"
+done
+
+exit $ret
diff --git a/tools/testing/selftests/riscv/mm/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
index 7f7d3eb8b9c9..f9ccae50349b 100644
--- a/tools/testing/selftests/riscv/mm/mmap_bottomup.c
+++ b/tools/testing/selftests/riscv/mm/mmap_bottomup.c
@@ -7,8 +7,6 @@
TEST(infinite_rlimit)
{
EXPECT_EQ(BOTTOM_UP, memory_layout());
-
- TEST_MMAPS;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/mmap_default.c b/tools/testing/selftests/riscv/mm/mmap_default.c
index 2ba3ec990006..3f53b6ecc326 100644
--- a/tools/testing/selftests/riscv/mm/mmap_default.c
+++ b/tools/testing/selftests/riscv/mm/mmap_default.c
@@ -7,8 +7,6 @@
TEST(default_rlimit)
{
EXPECT_EQ(TOP_DOWN, memory_layout());
-
- TEST_MMAPS;
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/mmap_test.h b/tools/testing/selftests/riscv/mm/mmap_test.h
index 3b29ca3bb3d4..75918d15919f 100644
--- a/tools/testing/selftests/riscv/mm/mmap_test.h
+++ b/tools/testing/selftests/riscv/mm/mmap_test.h
@@ -10,76 +10,9 @@
#define TOP_DOWN 0
#define BOTTOM_UP 1
-#if __riscv_xlen == 64
-uint64_t random_addresses[] = {
- 0x19764f0d73b3a9f0, 0x016049584cecef59, 0x3580bdd3562f4acd,
- 0x1164219f20b17da0, 0x07d97fcb40ff2373, 0x76ec528921272ee7,
- 0x4dd48c38a3de3f70, 0x2e11415055f6997d, 0x14b43334ac476c02,
- 0x375a60795aff19f6, 0x47f3051725b8ee1a, 0x4e697cf240494a9f,
- 0x456b59b5c2f9e9d1, 0x101724379d63cb96, 0x7fe9ad31619528c1,
- 0x2f417247c495c2ea, 0x329a5a5b82943a5e, 0x06d7a9d6adcd3827,
- 0x327b0b9ee37f62d5, 0x17c7b1851dfd9b76, 0x006ebb6456ec2cd9,
- 0x00836cd14146a134, 0x00e5c4dcde7126db, 0x004c29feadf75753,
- 0x00d8b20149ed930c, 0x00d71574c269387a, 0x0006ebe4a82acb7a,
- 0x0016135df51f471b, 0x00758bdb55455160, 0x00d0bdd949b13b32,
- 0x00ecea01e7c5f54b, 0x00e37b071b9948b1, 0x0011fdd00ff57ab3,
- 0x00e407294b52f5ea, 0x00567748c200ed20, 0x000d073084651046,
- 0x00ac896f4365463c, 0x00eb0d49a0b26216, 0x0066a2564a982a31,
- 0x002e0d20237784ae, 0x0000554ff8a77a76, 0x00006ce07a54c012,
- 0x000009570516d799, 0x00000954ca15b84d, 0x0000684f0d453379,
- 0x00002ae5816302b5, 0x0000042403fb54bf, 0x00004bad7392bf30,
- 0x00003e73bfa4b5e3, 0x00005442c29978e0, 0x00002803f11286b6,
- 0x000073875d745fc6, 0x00007cede9cb8240, 0x000027df84cc6a4f,
- 0x00006d7e0e74242a, 0x00004afd0b836e02, 0x000047d0e837cd82,
- 0x00003b42405efeda, 0x00001531bafa4c95, 0x00007172cae34ac4,
-};
-#else
-uint32_t random_addresses[] = {
- 0x8dc302e0, 0x929ab1e0, 0xb47683ba, 0xea519c73, 0xa19f1c90, 0xc49ba213,
- 0x8f57c625, 0xadfe5137, 0x874d4d95, 0xaa20f09d, 0xcf21ebfc, 0xda7737f1,
- 0xcedf392a, 0x83026c14, 0xccedca52, 0xc6ccf826, 0xe0cd9415, 0x997472ca,
- 0xa21a44c1, 0xe82196f5, 0xa23fd66b, 0xc28d5590, 0xd009cdce, 0xcf0be646,
- 0x8fc8c7ff, 0xe2a85984, 0xa3d3236b, 0x89a0619d, 0xc03db924, 0xb5d4cc1b,
- 0xb96ee04c, 0xd191da48, 0xb432a000, 0xaa2bebbc, 0xa2fcb289, 0xb0cca89b,
- 0xb0c18d6a, 0x88f58deb, 0xa4d42d1c, 0xe4d74e86, 0x99902b09, 0x8f786d31,
- 0xbec5e381, 0x9a727e65, 0xa9a65040, 0xa880d789, 0x8f1b335e, 0xfc821c1e,
- 0x97e34be4, 0xbbef84ed, 0xf447d197, 0xfd7ceee2, 0xe632348d, 0xee4590f4,
- 0x958992a5, 0xd57e05d6, 0xfd240970, 0xc5b0dcff, 0xd96da2c2, 0xa7ae041d,
-};
-#endif
-
-// Only works on 64 bit
-#if __riscv_xlen == 64
#define PROT (PROT_READ | PROT_WRITE)
#define FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
-/* mmap must return a value that doesn't use more bits than the hint address. */
-static inline unsigned long get_max_value(unsigned long input)
-{
- unsigned long max_bit = (1UL << (((sizeof(unsigned long) * 8) - 1 -
- __builtin_clzl(input))));
-
- return max_bit + (max_bit - 1);
-}
-
-#define TEST_MMAPS \
- ({ \
- void *mmap_addr; \
- for (int i = 0; i < ARRAY_SIZE(random_addresses); i++) { \
- mmap_addr = mmap((void *)random_addresses[i], \
- 5 * sizeof(int), PROT, FLAGS, 0, 0); \
- EXPECT_NE(MAP_FAILED, mmap_addr); \
- EXPECT_GE((void *)get_max_value(random_addresses[i]), \
- mmap_addr); \
- mmap_addr = mmap((void *)random_addresses[i], \
- 5 * sizeof(int), PROT, FLAGS, 0, 0); \
- EXPECT_NE(MAP_FAILED, mmap_addr); \
- EXPECT_GE((void *)get_max_value(random_addresses[i]), \
- mmap_addr); \
- } \
- })
-#endif /* __riscv_xlen == 64 */
-
static inline int memory_layout(void)
{
void *value1 = mmap(NULL, sizeof(int), PROT, FLAGS, 0, 0);