diff options
510 files changed, 22539 insertions, 7225 deletions
@@ -384,6 +384,7 @@ Li Yang <[email protected]> <[email protected]> Li Yang <[email protected]> <[email protected]> Lior David <[email protected]> <[email protected]> Lorenzo Pieralisi <[email protected]> <[email protected]> +Lorenzo Stoakes <[email protected]> <[email protected]> Luca Ceresoli <[email protected]> <[email protected]> Lukasz Luba <[email protected]> <[email protected]> Luo Jie <[email protected]> <[email protected]> @@ -3150,9 +3150,11 @@ S: Triftstra=DFe 55 S: 13353 Berlin S: Germany -N: Gustavo Pimental +N: Gustavo Pimentel D: PCI driver for Synopsys DesignWare +D: Synopsys DesignWare eDMA driver +D: Synopsys DesignWare xData traffic generator N: Emanuel Pirker diff --git a/Documentation/arch/riscv/cmodx.rst b/Documentation/arch/riscv/cmodx.rst index 1c0ca06b6c97..8c48bcff3df9 100644 --- a/Documentation/arch/riscv/cmodx.rst +++ b/Documentation/arch/riscv/cmodx.rst @@ -62,10 +62,10 @@ cmodx.c:: printf("Value before cmodx: %d\n", value); // Call prctl before first fence.i is called inside modify_instruction - prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_ON, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS); + prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_ON, PR_RISCV_SCOPE_PER_PROCESS); modify_instruction(); // Call prctl after final fence.i is called in process - prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_OFF, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS); + prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_OFF, PR_RISCV_SCOPE_PER_PROCESS); value = get_value(); printf("Value after cmodx: %d\n", value); diff --git a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml index 07ccbda4a0ab..b9a9f2cf32a1 100644 --- a/Documentation/devicetree/bindings/cache/qcom,llcc.yaml +++ b/Documentation/devicetree/bindings/cache/qcom,llcc.yaml @@ -66,7 +66,6 @@ allOf: compatible: contains: enum: - - qcom,qdu1000-llcc - qcom,sc7180-llcc - qcom,sm6350-llcc then: @@ -104,6 +103,7 @@ allOf: compatible: contains: enum: + - qcom,qdu1000-llcc - qcom,sc8180x-llcc - qcom,sc8280xp-llcc - qcom,x1e80100-llcc diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml index 41d023797d7d..8675d7d0215c 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml @@ -17,6 +17,7 @@ description: | properties: compatible: enum: + - pci17cb,1101 # QCA6390 - pci17cb,1103 # WCN6855 reg: @@ -28,10 +29,55 @@ properties: string to uniquely identify variant of the calibration data for designs with colliding bus and device ids + vddrfacmn-supply: + description: VDD_RFA_CMN supply regulator handle + + vddaon-supply: + description: VDD_AON supply regulator handle + + vddwlcx-supply: + description: VDD_WL_CX supply regulator handle + + vddwlmx-supply: + description: VDD_WL_MX supply regulator handle + + vddrfa0p8-supply: + description: VDD_RFA_0P8 supply regulator handle + + vddrfa1p2-supply: + description: VDD_RFA_1P2 supply regulator handle + + vddrfa1p7-supply: + description: VDD_RFA_1P7 supply regulator handle + + vddpcie0p9-supply: + description: VDD_PCIE_0P9 supply regulator handle + + vddpcie1p8-supply: + description: VDD_PCIE_1P8 supply regulator handle + required: - compatible - reg +allOf: + - if: + properties: + compatible: + contains: + const: pci17cb,1101 + then: + required: + - vddrfacmn-supply + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p7-supply + - vddpcie0p9-supply + - vddpcie1p8-supply + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml new file mode 100644 index 000000000000..1b5884015b15 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (c) 2024 Linaro Limited +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/qcom,ath12k.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies ath12k wireless devices (PCIe) + +maintainers: + - Jeff Johnson <[email protected]> + - Kalle Valo <[email protected]> + +description: + Qualcomm Technologies IEEE 802.11be PCIe devices. + +properties: + compatible: + enum: + - pci17cb,1107 # WCN7850 + + reg: + maxItems: 1 + + vddaon-supply: + description: VDD_AON supply regulator handle + + vddwlcx-supply: + description: VDD_WLCX supply regulator handle + + vddwlmx-supply: + description: VDD_WLMX supply regulator handle + + vddrfacmn-supply: + description: VDD_RFA_CMN supply regulator handle + + vddrfa0p8-supply: + description: VDD_RFA_0P8 supply regulator handle + + vddrfa1p2-supply: + description: VDD_RFA_1P2 supply regulator handle + + vddrfa1p8-supply: + description: VDD_RFA_1P8 supply regulator handle + + vddpcie0p9-supply: + description: VDD_PCIE_0P9 supply regulator handle + + vddpcie1p8-supply: + description: VDD_PCIE_1P8 supply regulator handle + +required: + - compatible + - reg + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddrfacmn-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p8-supply + - vddpcie0p9-supply + - vddpcie1p8-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/qcom,rpmh.h> + #include <dt-bindings/gpio/gpio.h> + pcie { + #address-cells = <3>; + #size-cells = <2>; + + pcie@0 { + device_type = "pci"; + reg = <0x0 0x0 0x0 0x0 0x0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + bus-range = <0x01 0xff>; + + wifi@0 { + compatible = "pci17cb,1107"; + reg = <0x10000 0x0 0x0 0x0 0x0>; + + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; + vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>; + vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>; + }; + }; + }; diff --git a/Documentation/networking/devlink/devlink-region.rst b/Documentation/networking/devlink/devlink-region.rst index 9232cd7da301..5d0b68f752c0 100644 --- a/Documentation/networking/devlink/devlink-region.rst +++ b/Documentation/networking/devlink/devlink-region.rst @@ -49,7 +49,7 @@ example usage $ devlink region show [ DEV/REGION ] $ devlink region del DEV/REGION snapshot SNAPSHOT_ID $ devlink region dump DEV/REGION [ snapshot SNAPSHOT_ID ] - $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length length + $ devlink region read DEV/REGION [ snapshot SNAPSHOT_ID ] address ADDRESS length LENGTH # Show all of the exposed regions with region sizes: $ devlink region show diff --git a/MAINTAINERS b/MAINTAINERS index e0f28278e504..5adfbc01bd2e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2892,7 +2892,7 @@ F: drivers/edac/altera_edac.[ch] ARM/SPREADTRUM SoC SUPPORT M: Orson Zhai <[email protected]> M: Baolin Wang <[email protected]> -M: Chunyan Zhang <[email protected]> +R: Chunyan Zhang <[email protected]> S: Maintained F: arch/arm64/boot/dts/sprd N: sprd @@ -6239,9 +6239,8 @@ S: Maintained F: drivers/usb/dwc3/ DESIGNWARE XDATA IP DRIVER -M: Gustavo Pimentel <[email protected]> -S: Maintained +S: Orphan F: Documentation/misc-devices/dw-xdata-pcie.rst F: drivers/misc/dw-xdata-pcie.c @@ -8834,6 +8833,7 @@ F: drivers/spi/spi-fsl-qspi.c FREESCALE QUICC ENGINE LIBRARY M: Qiang Zhao <[email protected]> +M: Christophe Leroy <[email protected]> S: Maintained F: drivers/soc/fsl/qe/ @@ -8883,9 +8883,10 @@ S: Maintained F: drivers/tty/serial/ucc_uart.c FREESCALE SOC DRIVERS +M: Christophe Leroy <[email protected]> L: [email protected] (moderated for non-subscribers) -S: Orphan +S: Maintained F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml F: Documentation/devicetree/bindings/soc/fsl/ F: drivers/soc/fsl/ @@ -14475,7 +14476,7 @@ MEMORY MAPPING M: Andrew Morton <[email protected]> R: Liam R. Howlett <[email protected]> R: Vlastimil Babka <[email protected]> -R: Lorenzo Stoakes <[email protected]> +R: Lorenzo Stoakes <[email protected]> S: Maintained W: http://www.linux-mm.org @@ -15768,8 +15769,13 @@ F: include/linux/tcp.h F: include/net/tcp.h F: include/trace/events/tcp.h F: include/uapi/linux/tcp.h +F: net/ipv4/inet_connection_sock.c +F: net/ipv4/inet_hashtables.c +F: net/ipv4/inet_timewait_sock.c F: net/ipv4/syncookies.c F: net/ipv4/tcp*.c +F: net/ipv6/inet6_connection_sock.c +F: net/ipv6/inet6_hashtables.c F: net/ipv6/syncookies.c F: net/ipv6/tcp*.c @@ -16448,7 +16454,7 @@ F: arch/arm/boot/dts/ti/omap/am335x-nano.dts OMAP1 SUPPORT M: Aaro Koskinen <[email protected]> M: Janusz Krzysztofik <[email protected]> -M: Tony Lindgren <[email protected]> +R: Tony Lindgren <[email protected]> S: Maintained Q: http://patchwork.kernel.org/project/linux-omap/list/ @@ -16460,10 +16466,13 @@ F: include/linux/platform_data/ams-delta-fiq.h F: include/linux/platform_data/i2c-omap.h OMAP2+ SUPPORT +M: Aaro Koskinen <[email protected]> +M: Andreas Kemnade <[email protected]> +M: Kevin Hilman <[email protected]> +M: Roger Quadros <[email protected]> M: Tony Lindgren <[email protected]> S: Maintained -W: http://www.muru.com/linux/omap/ W: http://linux.omap.com/ Q: http://patchwork.kernel.org/project/linux-omap/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index 8aa39db095d7..2c5155bd376b 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -61,7 +61,7 @@ static void davinci_pm_suspend(void) /* Configure sleep count in deep sleep register */ val = __raw_readl(pm_config.deepsleep_reg); - val &= ~DEEPSLEEP_SLEEPCOUNT_MASK, + val &= ~DEEPSLEEP_SLEEPCOUNT_MASK; val |= pm_config.sleepcount; __raw_writel(val, pm_config.deepsleep_reg); diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts index c204dd43c726..ce90327e1b2e 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h64-remix-mini-pc.dts @@ -191,7 +191,7 @@ compatible = "x-powers,axp803"; reg = <0x3a3>; interrupt-parent = <&r_intc>; - interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_LOW>; + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>; x-powers,drive-vbus-en; vin1-supply = <®_vcc5v>; diff --git a/arch/arm64/boot/dts/qcom/qdu1000.dtsi b/arch/arm64/boot/dts/qcom/qdu1000.dtsi index f2a5e2e40461..f90f03fa6a24 100644 --- a/arch/arm64/boot/dts/qcom/qdu1000.dtsi +++ b/arch/arm64/boot/dts/qcom/qdu1000.dtsi @@ -1459,9 +1459,23 @@ system-cache-controller@19200000 { compatible = "qcom,qdu1000-llcc"; - reg = <0 0x19200000 0 0xd80000>, + reg = <0 0x19200000 0 0x80000>, + <0 0x19300000 0 0x80000>, + <0 0x19600000 0 0x80000>, + <0 0x19700000 0 0x80000>, + <0 0x19a00000 0 0x80000>, + <0 0x19b00000 0 0x80000>, + <0 0x19e00000 0 0x80000>, + <0 0x19f00000 0 0x80000>, <0 0x1a200000 0 0x80000>; reg-names = "llcc0_base", + "llcc1_base", + "llcc2_base", + "llcc3_base", + "llcc4_base", + "llcc5_base", + "llcc6_base", + "llcc7_base", "llcc_broadcast_base"; interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi index 31de73594839..1b3dc0ece54d 100644 --- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi +++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi @@ -3605,7 +3605,7 @@ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>, - <GIC_PPI 12 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>; + <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>; }; pcie0: pcie@1c00000 { diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi index 067712310560..581a70c34fd2 100644 --- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi @@ -2647,11 +2647,14 @@ system-cache-controller@9200000 { compatible = "qcom,sc8180x-llcc"; - reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>, - <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>, - <0 0x09600000 0 0x50000>; + reg = <0 0x09200000 0 0x58000>, <0 0x09280000 0 0x58000>, + <0 0x09300000 0 0x58000>, <0 0x09380000 0 0x58000>, + <0 0x09400000 0 0x58000>, <0 0x09480000 0 0x58000>, + <0 0x09500000 0 0x58000>, <0 0x09580000 0 0x58000>, + <0 0x09600000 0 0x58000>; reg-names = "llcc0_base", "llcc1_base", "llcc2_base", - "llcc3_base", "llcc_broadcast_base"; + "llcc3_base", "llcc4_base", "llcc5_base", + "llcc6_base", "llcc7_base", "llcc_broadcast_base"; interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts index 41215567b3ae..372b35fb844f 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-crd.dts @@ -977,8 +977,7 @@ reset-n-pins { pins = "gpio99"; function = "gpio"; - output-high; - drive-strength = <16>; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts index e937732abede..4bf99b6b6e5f 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts @@ -655,15 +655,16 @@ status = "okay"; - /* FIXME: verify */ touchscreen@10 { - compatible = "hid-over-i2c"; + compatible = "elan,ekth5015m", "elan,ekth6915"; reg = <0x10>; - hid-descr-addr = <0x1>; interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>; - vdd-supply = <&vreg_misc_3p3>; - vddl-supply = <&vreg_s10b>; + reset-gpios = <&tlmm 99 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; + no-reset-on-power-off; + + vcc33-supply = <&vreg_misc_3p3>; + vccio-supply = <&vreg_misc_3p3>; pinctrl-names = "default"; pinctrl-0 = <&ts0_default>; @@ -1496,8 +1497,8 @@ reset-n-pins { pins = "gpio99"; function = "gpio"; - output-high; - drive-strength = <16>; + drive-strength = <2>; + bias-disable; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 0549ba1fbeea..59f0a850671a 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -4623,6 +4623,8 @@ restart@c264000 { compatible = "qcom,pshold"; reg = <0 0x0c264000 0 0x4>; + /* TZ seems to block access */ + status = "reserved"; }; tsens1: thermal-sensor@c265000 { diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi index aca0a87092e4..9ed062150aaf 100644 --- a/arch/arm64/boot/dts/qcom/sm6115.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi @@ -1090,6 +1090,7 @@ power-domains = <&rpmpd SM6115_VDDCX>; operating-points-v2 = <&sdhc1_opp_table>; + iommus = <&apps_smmu 0x00c0 0x0>; interconnects = <&system_noc MASTER_SDCC_1 RPM_ALWAYS_TAG &bimc SLAVE_EBI_CH0 RPM_ALWAYS_TAG>, <&bimc MASTER_AMPSS_M0 RPM_ALWAYS_TAG diff --git a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts index c5c2895b37c7..be6b1e7d07ce 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-crd.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-crd.dts @@ -49,6 +49,15 @@ stdout-path = "serial0:115200n8"; }; + reserved-memory { + linux,cma { + compatible = "shared-dma-pool"; + size = <0x0 0x8000000>; + reusable; + linux,cma-default; + }; + }; + sound { compatible = "qcom,x1e80100-sndcard"; model = "X1E80100-CRD"; @@ -93,7 +102,7 @@ }; codec { - sound-dai = <&wcd938x 1>, <&swr2 0>, <&lpass_txmacro 0>; + sound-dai = <&wcd938x 1>, <&swr2 1>, <&lpass_txmacro 0>; }; platform { @@ -744,7 +753,7 @@ wcd_tx: codec@0,3 { compatible = "sdw20217010d00"; reg = <0 3>; - qcom,tx-port-mapping = <1 1 2 3>; + qcom,tx-port-mapping = <2 2 3 4>; }; }; diff --git a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts index 2061fbe7b75a..8f67c393b871 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts +++ b/arch/arm64/boot/dts/qcom/x1e80100-qcp.dts @@ -23,6 +23,15 @@ stdout-path = "serial0:115200n8"; }; + reserved-memory { + linux,cma { + compatible = "shared-dma-pool"; + size = <0x0 0x8000000>; + reusable; + linux,cma-default; + }; + }; + vph_pwr: vph-pwr-regulator { compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/qcom/x1e80100.dtsi b/arch/arm64/boot/dts/qcom/x1e80100.dtsi index 5f90a0b3c016..05e4d491ec18 100644 --- a/arch/arm64/boot/dts/qcom/x1e80100.dtsi +++ b/arch/arm64/boot/dts/qcom/x1e80100.dtsi @@ -2737,15 +2737,17 @@ device_type = "pci"; compatible = "qcom,pcie-x1e80100"; reg = <0 0x01bf8000 0 0x3000>, - <0 0x70000000 0 0xf1d>, - <0 0x70000f20 0 0xa8>, + <0 0x70000000 0 0xf20>, + <0 0x70000f40 0 0xa8>, <0 0x70001000 0 0x1000>, - <0 0x70100000 0 0x100000>; + <0 0x70100000 0 0x100000>, + <0 0x01bfb000 0 0x1000>; reg-names = "parf", "dbi", "elbi", "atu", - "config"; + "config", + "mhi"; #address-cells = <3>; #size-cells = <2>; ranges = <0x01000000 0 0x00000000 0 0x70200000 0 0x100000>, diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 57a9abe78ee4..2c7bf4da0b80 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1036,6 +1036,7 @@ CONFIG_SND_AUDIO_GRAPH_CARD2=m CONFIG_HID_MULTITOUCH=m CONFIG_I2C_HID_ACPI=m CONFIG_I2C_HID_OF=m +CONFIG_I2C_HID_OF_ELAN=m CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 751331f5ba90..dd0bb069df4b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2147,7 +2147,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -2191,7 +2191,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index d1030bc52564..d283d281d28e 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -849,6 +849,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) { struct eeh_dev *edev; struct pci_dev *pdev; + struct pci_bus *bus = NULL; if (pe->type & EEH_PE_PHB) return pe->phb->bus; @@ -859,9 +860,11 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe) /* Retrieve the parent PCI bus of first (top) PCI device */ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); + pci_lock_rescan_remove(); pdev = eeh_dev_to_pci_dev(edev); if (pdev) - return pdev->bus; + bus = pdev->bus; + pci_unlock_rescan_remove(); - return NULL; + return bus; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4690c219bfa4..63432a33ec49 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -647,8 +647,9 @@ __after_prom_start: * Note: This process overwrites the OF exception vectors. */ LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET) - mr. r4,r26 /* In some cases the loader may */ - beq 9f /* have already put us at zero */ + mr r4,r26 /* Load the virtual source address into r4 */ + cmpld r3,r4 /* Check if source == dest */ + beq 9f /* If so skip the copy */ li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index 85050be08a23..72b12bc10f90 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -27,6 +27,7 @@ #include <asm/paca.h> #include <asm/mmu.h> #include <asm/sections.h> /* _end */ +#include <asm/setup.h> #include <asm/smp.h> #include <asm/hw_breakpoint.h> #include <asm/svm.h> @@ -317,6 +318,16 @@ void default_machine_kexec(struct kimage *image) if (!kdump_in_progress()) kexec_prepare_cpus(); +#ifdef CONFIG_PPC_PSERIES + /* + * This must be done after other CPUs have shut down, otherwise they + * could execute the 'scv' instruction, which is not supported with + * reloc disabled (see configure_exceptions()). + */ + if (firmware_has_feature(FW_FEATURE_SET_MODE)) + pseries_disable_reloc_on_exc(); +#endif + printk("kexec: Starting switchover sequence.\n"); /* switch to a staticly allocated stack. Based on irq stack code. diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 096d09ed89f6..431be156ca9b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -61,11 +61,3 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } else xics_kexec_teardown_cpu(secondary); } - -void pseries_machine_kexec(struct kimage *image) -{ - if (firmware_has_feature(FW_FEATURE_SET_MODE)) - pseries_disable_reloc_on_exc(); - - default_machine_kexec(image); -} diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index bba4ad192b0f..3968a6970fa8 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -38,7 +38,6 @@ static inline void smp_init_pseries(void) { } #endif extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary); -void pseries_machine_kexec(struct kimage *image); extern void pSeries_final_fixup(void); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 284a6fa04b0c..b10a25325238 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -343,8 +343,8 @@ static int alloc_dispatch_log_kmem_cache(void) { void (*ctor)(void *) = get_dtl_cache_ctor(); - dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, - DISPATCH_LOG_BYTES, 0, ctor); + dtl_cache = kmem_cache_create_usercopy("dtl", DISPATCH_LOG_BYTES, + DISPATCH_LOG_BYTES, 0, 0, DISPATCH_LOG_BYTES, ctor); if (!dtl_cache) { pr_warn("Failed to create dispatch trace log buffer cache\n"); pr_warn("Stolen time statistics will be unreliable\n"); @@ -1159,7 +1159,6 @@ define_machine(pseries) { .machine_check_exception = pSeries_machine_check_exception, .machine_check_log_err = pSeries_machine_check_log_err, #ifdef CONFIG_KEXEC_CORE - .machine_kexec = pseries_machine_kexec, .kexec_cpu_down = pseries_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c index ed9cad20c039..3c830a6f7ef4 100644 --- a/arch/riscv/kernel/machine_kexec.c +++ b/arch/riscv/kernel/machine_kexec.c @@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void) for_each_irq_desc(i, desc) { struct irq_chip *chip; - int ret; chip = irq_desc_get_chip(desc); if (!chip) continue; - /* - * First try to remove the active state. If this - * fails, try to EOI the interrupt. - */ - ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); - - if (ret && irqd_irq_inprogress(&desc->irq_data) && - chip->irq_eoi) + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) chip->irq_eoi(&desc->irq_data); if (chip->irq_mask) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 0d3f00eb0bae..10e311b2759d 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { unsigned long fp, sp, pc; + int graph_idx = 0; int level = 0; if (regs) { @@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, pc = regs->ra; } else { fp = frame->fp; - pc = ftrace_graph_ret_addr(current, NULL, frame->ra, + pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra, &frame->ra); if (pc == (unsigned long)ret_from_exception) { if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c index 04db1f993c47..bcf41d6e0df0 100644 --- a/arch/riscv/kvm/vcpu_pmu.c +++ b/arch/riscv/kvm/vcpu_pmu.c @@ -327,7 +327,7 @@ static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_att event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc); if (IS_ERR(event)) { - pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event)); + pr_debug("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event)); return PTR_ERR(event); } diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 95990461888f..9281063636a7 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -427,6 +427,7 @@ struct kvm_vcpu_stat { u64 instruction_io_other; u64 instruction_lpsw; u64 instruction_lpswe; + u64 instruction_lpswey; u64 instruction_pfmf; u64 instruction_ptff; u64 instruction_sck; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 82e9631cd9ef..54b5b2565df8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -132,6 +132,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, instruction_io_other), STATS_DESC_COUNTER(VCPU, instruction_lpsw), STATS_DESC_COUNTER(VCPU, instruction_lpswe), + STATS_DESC_COUNTER(VCPU, instruction_lpswey), STATS_DESC_COUNTER(VCPU, instruction_pfmf), STATS_DESC_COUNTER(VCPU, instruction_ptff), STATS_DESC_COUNTER(VCPU, instruction_sck), diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 111eb5c74784..bf8534218af3 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -138,6 +138,21 @@ static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar) return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } +static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar) +{ + u32 base1 = vcpu->arch.sie_block->ipb >> 28; + s64 disp1; + + /* The displacement is a 20bit _SIGNED_ value */ + disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + + ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19); + + if (ar) + *ar = base1; + + return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; +} + static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, u64 *address1, u64 *address2, u8 *ar_b1, u8 *ar_b2) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 1be19cc9d73c..1a49b89706f8 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -797,6 +797,36 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) return 0; } +static int handle_lpswey(struct kvm_vcpu *vcpu) +{ + psw_t new_psw; + u64 addr; + int rc; + u8 ar; + + vcpu->stat.instruction_lpswey++; + + if (!test_kvm_facility(vcpu->kvm, 193)) + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + addr = kvm_s390_get_base_disp_siy(vcpu, &ar); + if (addr & 7) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + + vcpu->arch.sie_block->gpsw = new_psw; + if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + + return 0; +} + static int handle_stidp(struct kvm_vcpu *vcpu) { u64 stidp_data = vcpu->kvm->arch.model.cpuid; @@ -1462,6 +1492,8 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) case 0x61: case 0x62: return handle_ri(vcpu); + case 0x71: + return handle_lpswey(vcpu); default: return -EOPNOTSUPP; } diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index abb629d7e131..7e3e767ab87d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -55,6 +55,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) void crst_table_free(struct mm_struct *mm, unsigned long *table) { + if (!table) + return; pagetable_free(virt_to_ptdesc(table)); } @@ -262,6 +264,8 @@ static unsigned long *base_crst_alloc(unsigned long val) static void base_crst_free(unsigned long *table) { + if (!table) + return; pagetable_free(virt_to_ptdesc(table)); } diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 08010dbf5e09..df275d554788 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -19,7 +19,7 @@ struct task_struct; -static inline struct task_struct *get_current(void) +static __always_inline struct task_struct *get_current(void) { return current_thread_info()->task; } diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 326db1c1d5d8..e0dffcc43b9e 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -91,7 +91,7 @@ struct thread_info { } /* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) +static __always_inline struct thread_info *current_thread_info(void) { struct thread_info *ti; __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t" diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index bd6a7857ce05..831fa4a12159 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -16,7 +16,6 @@ #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/sched.h> /* need_resched() */ -#include <linux/sort.h> #include <linux/tick.h> #include <linux/cpuidle.h> #include <linux/cpu.h> @@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); } -static int acpi_cst_latency_cmp(const void *a, const void *b) +static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) { - const struct acpi_processor_cx *x = a, *y = b; + int i, j, k; - if (!(x->valid && y->valid)) - return 0; - if (x->latency > y->latency) - return 1; - if (x->latency < y->latency) - return -1; - return 0; -} -static void acpi_cst_latency_swap(void *a, void *b, int n) -{ - struct acpi_processor_cx *x = a, *y = b; + for (i = 1; i < length; i++) { + if (!states[i].valid) + continue; - if (!(x->valid && y->valid)) - return; - swap(x->latency, y->latency); + for (j = i - 1, k = i; j >= 0; j--) { + if (!states[j].valid) + continue; + + if (states[j].latency > states[k].latency) + swap(states[j].latency, states[k].latency); + + k = j; + } + } } static int acpi_processor_power_verify(struct acpi_processor *pr) @@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) if (buggy_latency) { pr_notice("FW issue: working around C-state latencies out of order\n"); - sort(&pr->power.states[1], max_cstate, - sizeof(struct acpi_processor_cx), - acpi_cst_latency_cmp, - acpi_cst_latency_swap); + acpi_cst_latency_sort(&pr->power.states[1], max_cstate); } lapic_timer_propagate_broadcast(pr); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index d51fc8321d41..da32e8ed0830 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -269,8 +269,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) if (!devp->hd_ireqfreq) return -EIO; - if (count < sizeof(unsigned long)) - return -EINVAL; + if (in_compat_syscall()) { + if (count < sizeof(compat_ulong_t)) + return -EINVAL; + } else { + if (count < sizeof(unsigned long)) + return -EINVAL; + } add_wait_queue(&devp->hd_waitqueue, &wait); @@ -294,9 +299,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) schedule(); } - retval = put_user(data, (unsigned long __user *)buf); - if (!retval) - retval = sizeof(unsigned long); + if (in_compat_syscall()) { + retval = put_user(data, (compat_ulong_t __user *)buf); + if (!retval) + retval = sizeof(compat_ulong_t); + } else { + retval = put_user(data, (unsigned long __user *)buf); + if (!retval) + retval = sizeof(unsigned long); + } + out: __set_current_state(TASK_RUNNING); remove_wait_queue(&devp->hd_waitqueue, &wait); @@ -651,12 +663,24 @@ struct compat_hpet_info { unsigned short hi_timer; }; +/* 32-bit types would lead to different command codes which should be + * translated into 64-bit ones before passed to hpet_ioctl_common + */ +#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) +#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) + static long hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hpet_info info; int err; + if (cmd == COMPAT_HPET_INFO) + cmd = HPET_INFO; + + if (cmd == COMPAT_HPET_IRQFREQ) + cmd = HPET_IRQFREQ; + mutex_lock(&hpet_mutex); err = hpet_ioctl_common(file->private_data, cmd, arg, &info); mutex_unlock(&hpet_mutex); diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 4c695b0388f3..9bb142c75243 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -16,8 +16,8 @@ tpm-y += eventlog/common.o tpm-y += eventlog/tpm1.o tpm-y += eventlog/tpm2.o tpm-y += tpm-buf.o +tpm-y += tpm2-sessions.o -tpm-$(CONFIG_TCG_TPM2_HMAC) += tpm2-sessions.o tpm-$(CONFIG_ACPI) += tpm_ppi.o eventlog/acpi.o tpm-$(CONFIG_EFI) += eventlog/efi.o tpm-$(CONFIG_OF) += eventlog/of.o diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 907ac9956a78..2281d55df545 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -83,9 +83,6 @@ #define AES_KEY_BYTES AES_KEYSIZE_128 #define AES_KEY_BITS (AES_KEY_BYTES*8) -static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, - u32 *handle, u8 *name); - /* * This is the structure that carries all the auth information (like * session handle, nonces, session key and auth) from use to use it is @@ -148,6 +145,7 @@ struct tpm2_auth { u8 name[AUTH_MAX_NAMES][2 + SHA512_DIGEST_SIZE]; }; +#ifdef CONFIG_TCG_TPM2_HMAC /* * Name Size based on TPM algorithm (assumes no hash bigger than 255) */ @@ -163,6 +161,226 @@ static u8 name_size(const u8 *name) return size_map[alg] + 2; } +static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) +{ + struct tpm_header *head = (struct tpm_header *)buf->data; + off_t offset = TPM_HEADER_SIZE; + u32 tot_len = be32_to_cpu(head->length); + u32 val; + + /* we're starting after the header so adjust the length */ + tot_len -= TPM_HEADER_SIZE; + + /* skip public */ + val = tpm_buf_read_u16(buf, &offset); + if (val > tot_len) + return -EINVAL; + offset += val; + /* name */ + val = tpm_buf_read_u16(buf, &offset); + if (val != name_size(&buf->data[offset])) + return -EINVAL; + memcpy(name, &buf->data[offset], val); + /* forget the rest */ + return 0; +} + +static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) +{ + struct tpm_buf buf; + int rc; + + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); + if (rc) + return rc; + + tpm_buf_append_u32(&buf, handle); + rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); + if (rc == TPM2_RC_SUCCESS) + rc = tpm2_parse_read_public(name, &buf); + + tpm_buf_destroy(&buf); + + return rc; +} +#endif /* CONFIG_TCG_TPM2_HMAC */ + +/** + * tpm_buf_append_name() - add a handle area to the buffer + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @handle: The handle to be appended + * @name: The name of the handle (may be NULL) + * + * In order to compute session HMACs, we need to know the names of the + * objects pointed to by the handles. For most objects, this is simply + * the actual 4 byte handle or an empty buf (in these cases @name + * should be NULL) but for volatile objects, permanent objects and NV + * areas, the name is defined as the hash (according to the name + * algorithm which should be set to sha256) of the public area to + * which the two byte algorithm id has been appended. For these + * objects, the @name pointer should point to this. If a name is + * required but @name is NULL, then TPM2_ReadPublic() will be called + * on the handle to obtain the name. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + enum tpm2_mso_type mso = tpm2_handle_mso(handle); + struct tpm2_auth *auth; + int slot; +#endif + + if (!tpm2_chip_auth(chip)) { + tpm_buf_append_u32(buf, handle); + /* count the number of handles in the upper bits of flags */ + buf->handles++; + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4; + if (slot >= AUTH_MAX_NAMES) { + dev_err(&chip->dev, "TPM: too many handles\n"); + return; + } + auth = chip->auth; + WARN(auth->session != tpm_buf_length(buf), + "name added in wrong place\n"); + tpm_buf_append_u32(buf, handle); + auth->session += 4; + + if (mso == TPM2_MSO_PERSISTENT || + mso == TPM2_MSO_VOLATILE || + mso == TPM2_MSO_NVRAM) { + if (!name) + tpm2_read_public(chip, handle, auth->name[slot]); + } else { + if (name) + dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); + } + + auth->name_h[slot] = handle; + if (name) + memcpy(auth->name[slot], name, name_size(name)); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_name); + +/** + * tpm_buf_append_hmac_session() - Append a TPM session element + * @chip: the TPM chip structure + * @buf: The buffer to be appended + * @attributes: The session attributes + * @passphrase: The session authority (NULL if none) + * @passphrase_len: The length of the session authority (0 if none) + * + * This fills in a session structure in the TPM command buffer, except + * for the HMAC which cannot be computed until the command buffer is + * complete. The type of session is controlled by the @attributes, + * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the + * session won't terminate after tpm_buf_check_hmac_response(), + * TPM2_SA_DECRYPT which means this buffers first parameter should be + * encrypted with a session key and TPM2_SA_ENCRYPT, which means the + * response buffer's first parameter needs to be decrypted (confusing, + * but the defines are written from the point of view of the TPM). + * + * Any session appended by this command must be finalized by calling + * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect + * and the TPM will reject the command. + * + * As with most tpm_buf operations, success is assumed because failure + * will be caused by an incorrect programming model and indicated by a + * kernel message. + */ +void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, + int passphrase_len) +{ +#ifdef CONFIG_TCG_TPM2_HMAC + u8 nonce[SHA256_DIGEST_SIZE]; + struct tpm2_auth *auth; + u32 len; +#endif + + if (!tpm2_chip_auth(chip)) { + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); + return; + } + +#ifdef CONFIG_TCG_TPM2_HMAC + /* + * The Architecture Guide requires us to strip trailing zeros + * before computing the HMAC + */ + while (passphrase && passphrase_len > 0 && passphrase[passphrase_len - 1] == '\0') + passphrase_len--; + + auth = chip->auth; + auth->attrs = attributes; + auth->passphrase_len = passphrase_len; + if (passphrase_len) + memcpy(auth->passphrase, passphrase, passphrase_len); + + if (auth->session != tpm_buf_length(buf)) { + /* we're not the first session */ + len = get_unaligned_be32(&buf->data[auth->session]); + if (4 + len + auth->session != tpm_buf_length(buf)) { + WARN(1, "session length mismatch, cannot append"); + return; + } + + /* add our new session */ + len += 9 + 2 * SHA256_DIGEST_SIZE; + put_unaligned_be32(len, &buf->data[auth->session]); + } else { + tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); + } + + /* random number for our nonce */ + get_random_bytes(nonce, sizeof(nonce)); + memcpy(auth->our_nonce, nonce, sizeof(nonce)); + tpm_buf_append_u32(buf, auth->handle); + /* our new nonce */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); + tpm_buf_append_u8(buf, auth->attrs); + /* and put a placeholder for the hmac */ + tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); + tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); +#endif +} +EXPORT_SYMBOL_GPL(tpm_buf_append_hmac_session); + +#ifdef CONFIG_TCG_TPM2_HMAC + +static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy, + u32 *handle, u8 *name); + /* * It turns out the crypto hmac(sha256) is hard for us to consume * because it assumes a fixed key and the TPM seems to change the key @@ -344,82 +562,6 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip) } /** - * tpm_buf_append_hmac_session() - Append a TPM session element - * @chip: the TPM chip structure - * @buf: The buffer to be appended - * @attributes: The session attributes - * @passphrase: The session authority (NULL if none) - * @passphrase_len: The length of the session authority (0 if none) - * - * This fills in a session structure in the TPM command buffer, except - * for the HMAC which cannot be computed until the command buffer is - * complete. The type of session is controlled by the @attributes, - * the main ones of which are TPM2_SA_CONTINUE_SESSION which means the - * session won't terminate after tpm_buf_check_hmac_response(), - * TPM2_SA_DECRYPT which means this buffers first parameter should be - * encrypted with a session key and TPM2_SA_ENCRYPT, which means the - * response buffer's first parameter needs to be decrypted (confusing, - * but the defines are written from the point of view of the TPM). - * - * Any session appended by this command must be finalized by calling - * tpm_buf_fill_hmac_session() otherwise the HMAC will be incorrect - * and the TPM will reject the command. - * - * As with most tpm_buf operations, success is assumed because failure - * will be caused by an incorrect programming model and indicated by a - * kernel message. - */ -void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, - u8 attributes, u8 *passphrase, - int passphrase_len) -{ - u8 nonce[SHA256_DIGEST_SIZE]; - u32 len; - struct tpm2_auth *auth = chip->auth; - - /* - * The Architecture Guide requires us to strip trailing zeros - * before computing the HMAC - */ - while (passphrase && passphrase_len > 0 - && passphrase[passphrase_len - 1] == '\0') - passphrase_len--; - - auth->attrs = attributes; - auth->passphrase_len = passphrase_len; - if (passphrase_len) - memcpy(auth->passphrase, passphrase, passphrase_len); - - if (auth->session != tpm_buf_length(buf)) { - /* we're not the first session */ - len = get_unaligned_be32(&buf->data[auth->session]); - if (4 + len + auth->session != tpm_buf_length(buf)) { - WARN(1, "session length mismatch, cannot append"); - return; - } - - /* add our new session */ - len += 9 + 2 * SHA256_DIGEST_SIZE; - put_unaligned_be32(len, &buf->data[auth->session]); - } else { - tpm_buf_append_u32(buf, 9 + 2 * SHA256_DIGEST_SIZE); - } - - /* random number for our nonce */ - get_random_bytes(nonce, sizeof(nonce)); - memcpy(auth->our_nonce, nonce, sizeof(nonce)); - tpm_buf_append_u32(buf, auth->handle); - /* our new nonce */ - tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); - tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); - tpm_buf_append_u8(buf, auth->attrs); - /* and put a placeholder for the hmac */ - tpm_buf_append_u16(buf, SHA256_DIGEST_SIZE); - tpm_buf_append(buf, nonce, SHA256_DIGEST_SIZE); -} -EXPORT_SYMBOL(tpm_buf_append_hmac_session); - -/** * tpm_buf_fill_hmac_session() - finalize the session HMAC * @chip: the TPM chip structure * @buf: The buffer to be appended @@ -449,6 +591,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) u8 cphash[SHA256_DIGEST_SIZE]; struct sha256_state sctx; + if (!auth) + return; + /* save the command code in BE format */ auth->ordinal = head->ordinal; @@ -567,104 +712,6 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) } EXPORT_SYMBOL(tpm_buf_fill_hmac_session); -static int tpm2_parse_read_public(char *name, struct tpm_buf *buf) -{ - struct tpm_header *head = (struct tpm_header *)buf->data; - off_t offset = TPM_HEADER_SIZE; - u32 tot_len = be32_to_cpu(head->length); - u32 val; - - /* we're starting after the header so adjust the length */ - tot_len -= TPM_HEADER_SIZE; - - /* skip public */ - val = tpm_buf_read_u16(buf, &offset); - if (val > tot_len) - return -EINVAL; - offset += val; - /* name */ - val = tpm_buf_read_u16(buf, &offset); - if (val != name_size(&buf->data[offset])) - return -EINVAL; - memcpy(name, &buf->data[offset], val); - /* forget the rest */ - return 0; -} - -static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name) -{ - struct tpm_buf buf; - int rc; - - rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC); - if (rc) - return rc; - - tpm_buf_append_u32(&buf, handle); - rc = tpm_transmit_cmd(chip, &buf, 0, "read public"); - if (rc == TPM2_RC_SUCCESS) - rc = tpm2_parse_read_public(name, &buf); - - tpm_buf_destroy(&buf); - - return rc; -} - -/** - * tpm_buf_append_name() - add a handle area to the buffer - * @chip: the TPM chip structure - * @buf: The buffer to be appended - * @handle: The handle to be appended - * @name: The name of the handle (may be NULL) - * - * In order to compute session HMACs, we need to know the names of the - * objects pointed to by the handles. For most objects, this is simply - * the actual 4 byte handle or an empty buf (in these cases @name - * should be NULL) but for volatile objects, permanent objects and NV - * areas, the name is defined as the hash (according to the name - * algorithm which should be set to sha256) of the public area to - * which the two byte algorithm id has been appended. For these - * objects, the @name pointer should point to this. If a name is - * required but @name is NULL, then TPM2_ReadPublic() will be called - * on the handle to obtain the name. - * - * As with most tpm_buf operations, success is assumed because failure - * will be caused by an incorrect programming model and indicated by a - * kernel message. - */ -void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, - u32 handle, u8 *name) -{ - enum tpm2_mso_type mso = tpm2_handle_mso(handle); - struct tpm2_auth *auth = chip->auth; - int slot; - - slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE)/4; - if (slot >= AUTH_MAX_NAMES) { - dev_err(&chip->dev, "TPM: too many handles\n"); - return; - } - WARN(auth->session != tpm_buf_length(buf), - "name added in wrong place\n"); - tpm_buf_append_u32(buf, handle); - auth->session += 4; - - if (mso == TPM2_MSO_PERSISTENT || - mso == TPM2_MSO_VOLATILE || - mso == TPM2_MSO_NVRAM) { - if (!name) - tpm2_read_public(chip, handle, auth->name[slot]); - } else { - if (name) - dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n"); - } - - auth->name_h[slot] = handle; - if (name) - memcpy(auth->name[slot], name, name_size(name)); -} -EXPORT_SYMBOL(tpm_buf_append_name); - /** * tpm_buf_check_hmac_response() - check the TPM return HMAC for correctness * @chip: the TPM chip structure @@ -705,6 +752,9 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, u32 cc = be32_to_cpu(auth->ordinal); int parm_len, len, i, handles; + if (!auth) + return rc; + if (auth->session >= TPM_HEADER_SIZE) { WARN(1, "tpm session not filled correctly\n"); goto out; @@ -824,8 +874,13 @@ EXPORT_SYMBOL(tpm_buf_check_hmac_response); */ void tpm2_end_auth_session(struct tpm_chip *chip) { - tpm2_flush_context(chip, chip->auth->handle); - memzero_explicit(chip->auth, sizeof(*chip->auth)); + struct tpm2_auth *auth = chip->auth; + + if (!auth) + return; + + tpm2_flush_context(chip, auth->handle); + memzero_explicit(auth, sizeof(*auth)); } EXPORT_SYMBOL(tpm2_end_auth_session); @@ -907,6 +962,11 @@ int tpm2_start_auth_session(struct tpm_chip *chip) int rc; u32 null_key; + if (!auth) { + dev_warn_once(&chip->dev, "auth session is not active\n"); + return 0; + } + rc = tpm2_load_null(chip, &null_key); if (rc) goto out; @@ -1301,3 +1361,4 @@ int tpm2_sessions_init(struct tpm_chip *chip) return rc; } +#endif /* CONFIG_TCG_TPM2_HMAC */ diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index ba504e19d420..62d876e150e1 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -29,6 +29,7 @@ static const struct mtk_gate mfg_clks[] = { static const struct mtk_clk_desc mfg_desc = { .clks = mfg_clks, .num_clks = ARRAY_SIZE(mfg_clks), + .need_runtime_pm = true, }; static const struct of_device_id of_match_clk_mt8183_mfg[] = { diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index bd37ab4d1a9b..ba1d1c495bc2 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -496,14 +496,16 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev, } - devm_pm_runtime_enable(&pdev->dev); - /* - * Do a pm_runtime_resume_and_get() to workaround a possible - * deadlock between clk_register() and the genpd framework. - */ - r = pm_runtime_resume_and_get(&pdev->dev); - if (r) - return r; + if (mcd->need_runtime_pm) { + devm_pm_runtime_enable(&pdev->dev); + /* + * Do a pm_runtime_resume_and_get() to workaround a possible + * deadlock between clk_register() and the genpd framework. + */ + r = pm_runtime_resume_and_get(&pdev->dev); + if (r) + return r; + } /* Calculate how many clk_hw_onecell_data entries to allocate */ num_clks = mcd->num_clks + mcd->num_composite_clks; @@ -585,7 +587,8 @@ static int __mtk_clk_simple_probe(struct platform_device *pdev, goto unregister_clks; } - pm_runtime_put(&pdev->dev); + if (mcd->need_runtime_pm) + pm_runtime_put(&pdev->dev); return r; @@ -618,7 +621,8 @@ free_base: if (mcd->shared_io && base) iounmap(base); - pm_runtime_put(&pdev->dev); + if (mcd->need_runtime_pm) + pm_runtime_put(&pdev->dev); return r; } diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index 22096501a60a..c17fe1c2d732 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -237,6 +237,8 @@ struct mtk_clk_desc { int (*clk_notifier_func)(struct device *dev, struct clk *clk); unsigned int mfg_clk_idx; + + bool need_runtime_pm; }; int mtk_clk_pdev_probe(struct platform_device *pdev); diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c index 5f7f537e4ecb..e8632db2c542 100644 --- a/drivers/clk/qcom/apss-ipq-pll.c +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -70,7 +70,6 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = { static const struct alpha_pll_config ipq5018_pll_config = { .l = 0x2a, .config_ctl_val = 0x4001075b, - .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), .aux_output_mask = BIT(1), .early_output_mask = BIT(3), @@ -84,7 +83,6 @@ static const struct alpha_pll_config ipq5018_pll_config = { static const struct alpha_pll_config ipq5332_pll_config = { .l = 0x2d, .config_ctl_val = 0x4001075b, - .config_ctl_hi_val = 0x304, .main_output_mask = BIT(0), .aux_output_mask = BIT(1), .early_output_mask = BIT(3), diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index d4227909d1fe..c51647e37df8 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -2574,6 +2574,9 @@ static int clk_alpha_pll_stromer_plus_set_rate(struct clk_hw *hw, regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> ALPHA_BITWIDTH); + regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll), + PLL_ALPHA_EN, PLL_ALPHA_EN); + regmap_write(pll->clkr.regmap, PLL_MODE(pll), PLL_BYPASSNL); /* Wait five micro seconds or more */ diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c index 0a3f846695b8..f8b9a1e93bef 100644 --- a/drivers/clk/qcom/gcc-ipq9574.c +++ b/drivers/clk/qcom/gcc-ipq9574.c @@ -2140,9 +2140,10 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = { static struct clk_branch gcc_crypto_axi_clk = { .halt_reg = 0x16010, + .halt_check = BRANCH_HALT_VOTED, .clkr = { - .enable_reg = 0x16010, - .enable_mask = BIT(0), + .enable_reg = 0xb004, + .enable_mask = BIT(15), .hw.init = &(const struct clk_init_data) { .name = "gcc_crypto_axi_clk", .parent_hws = (const struct clk_hw *[]) { @@ -2156,9 +2157,10 @@ static struct clk_branch gcc_crypto_axi_clk = { static struct clk_branch gcc_crypto_ahb_clk = { .halt_reg = 0x16014, + .halt_check = BRANCH_HALT_VOTED, .clkr = { - .enable_reg = 0x16014, - .enable_mask = BIT(0), + .enable_reg = 0xb004, + .enable_mask = BIT(16), .hw.init = &(const struct clk_init_data) { .name = "gcc_crypto_ahb_clk", .parent_hws = (const struct clk_hw *[]) { diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c index cf4a7b6e0b23..0559a33faf00 100644 --- a/drivers/clk/qcom/gcc-sm6350.c +++ b/drivers/clk/qcom/gcc-sm6350.c @@ -100,8 +100,8 @@ static struct clk_alpha_pll gpll6 = { .enable_mask = BIT(6), .hw.init = &(struct clk_init_data){ .name = "gpll6", - .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", }, .num_parents = 1, .ops = &clk_alpha_pll_fixed_fabia_ops, @@ -124,7 +124,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_even = { .clkr.hw.init = &(struct clk_init_data){ .name = "gpll6_out_even", .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + &gpll6.clkr.hw, }, .num_parents = 1, .ops = &clk_alpha_pll_postdiv_fabia_ops, @@ -139,8 +139,8 @@ static struct clk_alpha_pll gpll7 = { .enable_mask = BIT(7), .hw.init = &(struct clk_init_data){ .name = "gpll7", - .parent_hws = (const struct clk_hw*[]){ - &gpll0.clkr.hw, + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", }, .num_parents = 1, .ops = &clk_alpha_pll_fixed_fabia_ops, diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index ac0091b4ce24..be375ce0149c 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -132,7 +132,6 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, for (i = 0; i < desc->hw_clks->num ; i++) { struct clk_hw *hw = desc->hw_clks->hws[i]; - struct ccu_common *common = hw_to_ccu_common(hw); const char *name; if (!hw) @@ -147,14 +146,21 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, pr_err("Couldn't register clock %d - %s\n", i, name); goto err_clk_unreg; } + } + + for (i = 0; i < desc->num_ccu_clks; i++) { + struct ccu_common *cclk = desc->ccu_clks[i]; + + if (!cclk) + continue; - if (common->max_rate) - clk_hw_set_rate_range(hw, common->min_rate, - common->max_rate); + if (cclk->max_rate) + clk_hw_set_rate_range(&cclk->hw, cclk->min_rate, + cclk->max_rate); else - WARN(common->min_rate, + WARN(cclk->min_rate, "No max_rate, ignoring min_rate of clock %d - %s\n", - i, name); + i, clk_hw_get_name(&cclk->hw)); } ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 37f1cdf46d29..4ac3a35dcd98 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) pr_warn(FW_WARN "P-state 0 is not max freq\n"); - if (acpi_cpufreq_driver.set_boost) + if (acpi_cpufreq_driver.set_boost) { set_boost(policy, acpi_cpufreq_driver.boost_enabled); + policy->boost_enabled = acpi_cpufreq_driver.boost_enabled; + } return result; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45aac17c20f..9e5060b27864 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1431,7 +1431,8 @@ static int cpufreq_online(unsigned int cpu) } /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ - policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy); + if (cpufreq_boost_enabled() && policy_has_boost_freq(policy)) + policy->boost_enabled = true; /* * The initialization has succeeded and the policy is online. diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 0d139e4de37c..8a347b938406 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1107,9 +1107,16 @@ struct cs_dsp_coeff_parsed_coeff { int len; }; -static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) +static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail, + const u8 **str) { - int length; + int length, total_field_len; + + /* String fields are at least one __le32 */ + if (sizeof(__le32) > avail) { + *pos = NULL; + return 0; + } switch (bytes) { case 1: @@ -1122,10 +1129,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) return 0; } + total_field_len = ((length + bytes) + 3) & ~0x03; + if ((unsigned int)total_field_len > avail) { + *pos = NULL; + return 0; + } + if (str) *str = *pos + bytes; - *pos += ((length + bytes) + 3) & ~0x03; + *pos += total_field_len; return length; } @@ -1150,71 +1163,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos) return val; } -static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_alg *blk) +static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, + const struct wmfw_region *region, + struct cs_dsp_coeff_parsed_alg *blk) { const struct wmfw_adsp_alg_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int pos; + const u8 *tmp; + + raw = (const struct wmfw_adsp_alg_data *)region->data; switch (dsp->fw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_alg_data *)*data; - *data = raw->data; + if (sizeof(*raw) > data_len) + return -EOVERFLOW; blk->id = le32_to_cpu(raw->id); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ncoeff = le32_to_cpu(raw->ncoeff); + + pos = sizeof(*raw); break; default: - blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data, + if (sizeof(raw->id) > data_len) + return -EOVERFLOW; + + tmp = region->data; + blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp); + pos = tmp - region->data; + + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u16), data, NULL); - blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ncoeff) > (data_len - pos)) + return -EOVERFLOW; + + blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp); + pos += sizeof(raw->ncoeff); break; } + if ((int)blk->ncoeff < 0) + return -EOVERFLOW; + cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id); cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name); cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff); + + return pos; } -static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, - struct cs_dsp_coeff_parsed_coeff *blk) +static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, + const struct wmfw_region *region, + unsigned int pos, + struct cs_dsp_coeff_parsed_coeff *blk) { const struct wmfw_adsp_coeff_data *raw; + unsigned int data_len = le32_to_cpu(region->len); + unsigned int blk_len, blk_end_pos; const u8 *tmp; - int length; + + raw = (const struct wmfw_adsp_coeff_data *)®ion->data[pos]; + if (sizeof(raw->hdr) > (data_len - pos)) + return -EOVERFLOW; + + blk_len = le32_to_cpu(raw->hdr.size); + if (blk_len > S32_MAX) + return -EOVERFLOW; + + if (blk_len > (data_len - pos - sizeof(raw->hdr))) + return -EOVERFLOW; + + blk_end_pos = pos + sizeof(raw->hdr) + blk_len; + + blk->offset = le16_to_cpu(raw->hdr.offset); + blk->mem_type = le16_to_cpu(raw->hdr.type); switch (dsp->fw_ver) { case 0: case 1: - raw = (const struct wmfw_adsp_coeff_data *)*data; - *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size); + if (sizeof(*raw) > (data_len - pos)) + return -EOVERFLOW; - blk->offset = le16_to_cpu(raw->hdr.offset); - blk->mem_type = le16_to_cpu(raw->hdr.type); blk->name = raw->name; - blk->name_len = strlen(raw->name); + blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); blk->ctl_type = le16_to_cpu(raw->ctl_type); blk->flags = le16_to_cpu(raw->flags); blk->len = le32_to_cpu(raw->len); break; default: - tmp = *data; - blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp); - blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp); - length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp); - blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, + pos += sizeof(raw->hdr); + tmp = ®ion->data[pos]; + blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, &blk->name); - cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL); - cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); + if (!tmp) + return -EOVERFLOW; + + pos = tmp - region->data; + if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) > + (data_len - pos)) + return -EOVERFLOW; + blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp); + pos += sizeof(raw->ctl_type); blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp); + pos += sizeof(raw->flags); blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp); - - *data = *data + sizeof(raw->hdr) + length; break; } @@ -1224,6 +1300,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags); cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type); cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len); + + return blk_end_pos; } static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp, @@ -1247,12 +1325,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, struct cs_dsp_alg_region alg_region = {}; struct cs_dsp_coeff_parsed_alg alg_blk; struct cs_dsp_coeff_parsed_coeff coeff_blk; - const u8 *data = region->data; - int i, ret; + int i, pos, ret; + + pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk); + if (pos < 0) + return pos; - cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk); for (i = 0; i < alg_blk.ncoeff; i++) { - cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk); + pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk); + if (pos < 0) + return pos; switch (coeff_blk.ctl_type) { case WMFW_CTL_TYPE_BYTES: @@ -1321,6 +1403,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp1_sizes *adsp1_sizes; adsp1_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp1_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file, le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm), @@ -1337,6 +1423,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp, const struct wmfw_adsp2_sizes *adsp2_sizes; adsp2_sizes = (void *)&firmware->data[pos]; + if (sizeof(*adsp2_sizes) > firmware->size - pos) { + cs_dsp_err(dsp, "%s: file truncated\n", file); + return 0; + } cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file, le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym), @@ -1376,7 +1466,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, struct regmap *regmap = dsp->regmap; unsigned int pos = 0; const struct wmfw_header *header; - const struct wmfw_adsp1_sizes *adsp1_sizes; const struct wmfw_footer *footer; const struct wmfw_region *region; const struct cs_dsp_region *mem; @@ -1392,10 +1481,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, ret = -EINVAL; - pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer); - if (pos >= firmware->size) { - cs_dsp_err(dsp, "%s: file too short, %zu bytes\n", - file, firmware->size); + if (sizeof(*header) >= firmware->size) { + ret = -EOVERFLOW; goto out_fw; } @@ -1423,22 +1510,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, pos = sizeof(*header); pos = dsp->ops->parse_sizes(dsp, file, pos, firmware); + if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) { + ret = -EOVERFLOW; + goto out_fw; + } footer = (void *)&firmware->data[pos]; pos += sizeof(*footer); if (le32_to_cpu(header->len) != pos) { - cs_dsp_err(dsp, "%s: unexpected header length %d\n", - file, le32_to_cpu(header->len)); + ret = -EOVERFLOW; goto out_fw; } cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file, le64_to_cpu(footer->timestamp)); - while (pos < firmware->size && - sizeof(*region) < firmware->size - pos) { + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*region) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } + region = (void *)&(firmware->data[pos]); + + if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) { + ret = -EOVERFLOW; + goto out_fw; + } + region_name = "Unknown"; reg = 0; text = NULL; @@ -1495,16 +1596,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions, le32_to_cpu(region->len), offset, region_name); - if (le32_to_cpu(region->len) > - firmware->size - pos - sizeof(*region)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, regions, region_name, - le32_to_cpu(region->len), firmware->size); - ret = -EINVAL; - goto out_fw; - } - if (text) { memcpy(text, region->data, le32_to_cpu(region->len)); cs_dsp_info(dsp, "%s: %s\n", file, text); @@ -1555,6 +1646,9 @@ out_fw: cs_dsp_buf_free(&buf_list); kfree(text); + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); + return ret; } @@ -2122,10 +2216,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware pos = le32_to_cpu(hdr->len); blocks = 0; - while (pos < firmware->size && - sizeof(*blk) < firmware->size - pos) { + while (pos < firmware->size) { + /* Is there enough data for a complete block header? */ + if (sizeof(*blk) > firmware->size - pos) { + ret = -EOVERFLOW; + goto out_fw; + } + blk = (void *)(&firmware->data[pos]); + if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { + ret = -EOVERFLOW; + goto out_fw; + } + type = le16_to_cpu(blk->type); offset = le16_to_cpu(blk->offset); version = le32_to_cpu(blk->ver) >> 8; @@ -2222,17 +2326,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware } if (reg) { - if (le32_to_cpu(blk->len) > - firmware->size - pos - sizeof(*blk)) { - cs_dsp_err(dsp, - "%s.%d: %s region len %d bytes exceeds file length %zu\n", - file, blocks, region_name, - le32_to_cpu(blk->len), - firmware->size); - ret = -EINVAL; - goto out_fw; - } - buf = cs_dsp_buf_alloc(blk->data, le32_to_cpu(blk->len), &buf_list); @@ -2272,6 +2365,10 @@ out_fw: regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); kfree(text); + + if (ret == -EOVERFLOW) + cs_dsp_err(dsp, "%s: file content overflows file data\n", file); + return ret; } diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 880ffcb50088..921f61507ae8 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -101,8 +101,10 @@ static __init struct device *sysfb_parent_dev(const struct screen_info *si) if (IS_ERR(pdev)) { return ERR_CAST(pdev); } else if (pdev) { - if (!sysfb_pci_dev_is_enabled(pdev)) + if (!sysfb_pci_dev_is_enabled(pdev)) { + pci_dev_put(pdev); return ERR_PTR(-ENODEV); + } return &pdev->dev; } @@ -137,7 +139,7 @@ static __init int sysfb_init(void) if (compatible) { pd = sysfb_create_simplefb(si, &mode, parent); if (!IS_ERR(pd)) - goto unlock_mutex; + goto put_device; } /* if the FB is incompatible, create a legacy framebuffer device */ @@ -155,7 +157,7 @@ static __init int sysfb_init(void) pd = platform_device_alloc(name, 0); if (!pd) { ret = -ENOMEM; - goto unlock_mutex; + goto put_device; } pd->dev.parent = parent; @@ -170,9 +172,11 @@ static __init int sysfb_init(void) if (ret) goto err; - goto unlock_mutex; + goto put_device; err: platform_device_put(pd); +put_device: + put_device(parent); unlock_mutex: mutex_unlock(&disable_lock); return ret; diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index 71e1af7c2184..d89e78f0ead3 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -619,8 +619,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, ret = gpiochip_get_ngpios(gc, dev); if (ret) gc->ngpio = gc->bgpio_bits; - else - gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8)); ret = bgpio_setup_io(gc, dat, set, clr, flags); if (ret) diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d75f6ee37028..89d5e64cf68b 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -203,6 +203,24 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "qi,lb60", "rb-gpios", true }, #endif +#if IS_ENABLED(CONFIG_PCI_LANTIQ) + /* + * According to the PCI specification, the RST# pin is an + * active-low signal. However, most of the device trees that + * have been widely used for a long time incorrectly describe + * reset GPIO as active-high, and were also using wrong name + * for the property. + */ + { "lantiq,pci-xway", "gpio-reset", false }, +#endif +#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005) + /* + * DTS for Nokia N900 incorrectly specified "active high" + * polarity for the reset line, while the chip actually + * treats it as "active low". + */ + { "ti,tsc2005", "reset-gpios", false }, +#endif }; unsigned int i; @@ -504,9 +522,9 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np, { "reset", "reset-n-io", "marvell,nfc-uart" }, { "reset", "reset-n-io", "mrvl,nfc-uart" }, #endif -#if !IS_ENABLED(CONFIG_PCI_LANTIQ) +#if IS_ENABLED(CONFIG_PCI_LANTIQ) /* MIPS Lantiq PCI */ - { "reset", "gpios-reset", "lantiq,pci-xway" }, + { "reset", "gpio-reset", "lantiq,pci-xway" }, #endif /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e9ac20bed0f2..3cdcadd41be1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -10048,6 +10048,7 @@ skip_modeset: } /* Update Freesync settings. */ + reset_freesync_config_for_crtc(dm_new_crtc_state); get_freesync_config_for_crtc(dm_new_crtc_state, dm_new_conn_state); @@ -11181,6 +11182,49 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, return ret; } +static void parse_edid_displayid_vrr(struct drm_connector *connector, + struct edid *edid) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + u16 min_vfreq; + u16 max_vfreq; + + if (edid == NULL || edid->extensions == 0) + return; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + if (edid_ext == NULL) + return; + + while (j < EDID_LENGTH) { + /* Get dynamic video timing range from DisplayID if available */ + if (EDID_LENGTH - j > 13 && edid_ext[j] == 0x25 && + (edid_ext[j+1] & 0xFE) == 0 && (edid_ext[j+2] == 9)) { + min_vfreq = edid_ext[j+9]; + if (edid_ext[j+1] & 7) + max_vfreq = edid_ext[j+10] + ((edid_ext[j+11] & 3) << 8); + else + max_vfreq = edid_ext[j+10]; + + if (max_vfreq && min_vfreq) { + connector->display_info.monitor_range.max_vfreq = max_vfreq; + connector->display_info.monitor_range.min_vfreq = min_vfreq; + + return; + } + } + j++; + } +} + static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) { @@ -11302,6 +11346,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; + /* Some eDP panels only have the refresh rate range info in DisplayID */ + if ((connector->display_info.monitor_range.min_vfreq == 0 || + connector->display_info.monitor_range.max_vfreq == 0)) + parse_edid_displayid_vrr(connector, edid); + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; @@ -11309,9 +11358,11 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (is_dp_capable_without_timing_msa(adev->dm.dc, amdgpu_dm_connector)) { if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { - freesync_capable = true; amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + if (amdgpu_dm_connector->max_vfreq - + amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; } else { edid_check_required = edid->version > 1 || (edid->version == 1 && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 6c84b0fa40f4..0782a34689a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -3364,6 +3364,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.UrgentBurstFactorLumaPre[k], &mode_lib->vba.UrgentBurstFactorChromaPre[k], &mode_lib->vba.NotUrgentLatencyHidingPre[k]); + + v->cursor_bw_pre[k] = mode_lib->vba.NumberOfCursors[k] * mode_lib->vba.CursorWidth[k][0] * mode_lib->vba.CursorBPP[k][0] / + 8.0 / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * v->VRatioPreY[i][j][k]; } { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index a41812598ce8..8ecc972dbffd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -234,6 +234,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; out->dispclk_dppclk_vco_speed_mhz = 3600; + out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 0f8b3336e26d..cbd1c1f26b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -294,7 +294,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (unsigned int)in_ctx->v20.dml_core_ctx.mp.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx] == dml_fclock_change_unsupported) + if (in_ctx->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0] == dml_fclock_change_unsupported) context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false; else context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 571691837200..09cbc3afd6d8 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -734,7 +734,7 @@ struct atom_gpio_pin_lut_v2_1 { struct atom_common_table_header table_header; /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ - struct atom_gpio_pin_assignment gpio_pin[8]; + struct atom_gpio_pin_assignment gpio_pin[]; }; diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c index 97e579c33d84..1e200d815e1a 100644 --- a/drivers/gpu/drm/drm_fbdev_generic.c +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -84,7 +84,8 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, sizes->surface_width, sizes->surface_height, sizes->surface_bpp); - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, + sizes->surface_depth); buffer = drm_client_framebuffer_create(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 2166208a961d..3860a8ce1e2d 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -420,13 +420,20 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"), }, .driver_data = (void *)&lcd1280x1920_rightside_up, - }, { /* Valve Steam Deck */ + }, { /* Valve Steam Deck (Jupiter) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Valve Steam Deck (Galileo) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galileo"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* VIOS LTH17 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"), diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 3c3fc53376ce..6bff169fa8d4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2088,6 +2088,9 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, u32 ln0, ln1, pin_assignment; u8 width; + if (DISPLAY_VER(dev_priv) >= 14) + return; + if (!intel_encoder_is_tc(&dig_port->base) || intel_tc_port_in_tbt_alt_mode(dig_port)) return; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 856b3ef5edb8..0c71d761d378 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1001,6 +1001,9 @@ nouveau_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, nv_connector->native_mode); + if (!mode) + return 0; + drm_mode_probed_add(connector, mode); ret = 1; } diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index b8a84f26b3ef..b5e7b919f241 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -86,15 +86,15 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride, int ret = 0; void *out_alloc; + if (!in->count) + return NULL; + /* User stride must be at least the minimum object size, otherwise it might * lack useful information. */ if (in->stride < min_stride) return ERR_PTR(-EINVAL); - if (!in->count) - return NULL; - out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); if (!out_alloc) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 79ffcbc41d78..9a0ff48f7061 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -459,6 +459,16 @@ struct panthor_queue { atomic64_t seqno; /** + * @last_fence: Fence of the last submitted job. + * + * We return this fence when we get an empty command stream. + * This way, we are guaranteed that all earlier jobs have completed + * when drm_sched_job::s_fence::finished without having to feed + * the CS ring buffer with a dummy job that only signals the fence. + */ + struct dma_fence *last_fence; + + /** * @in_flight_jobs: List containing all in-flight jobs. * * Used to keep track and signal panthor_job::done_fence when the @@ -829,6 +839,9 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue * panthor_kernel_bo_destroy(queue->ringbuf); panthor_kernel_bo_destroy(queue->iface.mem); + /* Release the last_fence we were holding, if any. */ + dma_fence_put(queue->fence_ctx.last_fence); + kfree(queue); } @@ -2784,9 +2797,6 @@ static void group_sync_upd_work(struct work_struct *work) spin_lock(&queue->fence_ctx.lock); list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { - if (!job->call_info.size) - continue; - if (syncobj->seqno < job->done_fence->seqno) break; @@ -2865,11 +2875,14 @@ queue_run_job(struct drm_sched_job *sched_job) static_assert(sizeof(call_instrs) % 64 == 0, "call_instrs is not aligned on a cacheline"); - /* Stream size is zero, nothing to do => return a NULL fence and let - * drm_sched signal the parent. + /* Stream size is zero, nothing to do except making sure all previously + * submitted jobs are done before we signal the + * drm_sched_job::s_fence::finished fence. */ - if (!job->call_info.size) - return NULL; + if (!job->call_info.size) { + job->done_fence = dma_fence_get(queue->fence_ctx.last_fence); + return dma_fence_get(job->done_fence); + } ret = pm_runtime_resume_and_get(ptdev->base.dev); if (drm_WARN_ON(&ptdev->base, ret)) @@ -2928,6 +2941,10 @@ queue_run_job(struct drm_sched_job *sched_job) } } + /* Update the last fence. */ + dma_fence_put(queue->fence_ctx.last_fence); + queue->fence_ctx.last_fence = dma_fence_get(job->done_fence); + done_fence = dma_fence_get(job->done_fence); out_unlock: @@ -3378,10 +3395,15 @@ panthor_job_create(struct panthor_file *pfile, goto err_put_job; } - job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); - if (!job->done_fence) { - ret = -ENOMEM; - goto err_put_job; + /* Empty command streams don't need a fence, they'll pick the one from + * the previously submitted job. + */ + if (job->call_info.size) { + job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL); + if (!job->done_fence) { + ret = -ENOMEM; + goto err_put_job; + } } ret = drm_sched_job_init(&job->base, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 2ef201a072f1..e66a230331ee 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -642,7 +642,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, if (r) goto error_unlock; - if (bo_va->it.start) + if (bo_va->it.start && bo_va->bo) r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); error_unlock: diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6396dece0db1..2427be8bc97f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -346,6 +346,7 @@ static void ttm_bo_release(struct kref *kref) if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) || (want_init_on_free() && (bo->ttm != NULL)) || + bo->type == ttm_bo_type_sg || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index 577bd7043740..0443e07880a0 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -342,7 +342,7 @@ static void init_steering_oaddrm(struct xe_gt *gt) else gt->steering[OADDRM].group_target = 1; - gt->steering[DSS].instance_target = 0; /* unused */ + gt->steering[OADDRM].instance_target = 0; /* unused */ } static void init_steering_sqidi_psmi(struct xe_gt *gt) @@ -357,8 +357,8 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt) static void init_steering_inst0(struct xe_gt *gt) { - gt->steering[DSS].group_target = 0; /* unused */ - gt->steering[DSS].instance_target = 0; /* unused */ + gt->steering[INSTANCE0].group_target = 0; /* unused */ + gt->steering[INSTANCE0].instance_target = 0; /* unused */ } static const struct { diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 65e5a3f4c340..198f5c2189cb 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -1334,7 +1334,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, GFP_KERNEL, true, 0); if (IS_ERR(sa_bo)) { err = PTR_ERR(sa_bo); - goto err; + goto err_bb; } ppgtt_ofs = NUM_KERNEL_PDE + @@ -1385,7 +1385,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, update_idx); if (IS_ERR(job)) { err = PTR_ERR(job); - goto err_bb; + goto err_sa; } /* Wait on BO move */ @@ -1434,12 +1434,12 @@ xe_migrate_update_pgtables(struct xe_migrate *m, err_job: xe_sched_job_put(job); +err_sa: + drm_suballoc_free(sa_bo, NULL); err_bb: if (!q) mutex_unlock(&m->job_mutex); xe_bb_free(bb, NULL); -err: - drm_suballoc_free(sa_bo, NULL); return ERR_PTR(err); } diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a12525b3186b..f448505d5468 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -15,7 +15,6 @@ #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/timer.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/io.h> @@ -32,7 +31,6 @@ struct i2c_pnx_mif { int ret; /* Return value */ int mode; /* Interface mode */ struct completion complete; /* I/O completion */ - struct timer_list timer; /* Timeout */ u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ int order; /* RX Bytes to order via TX */ @@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data) return (timeout <= 0); } -static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) -{ - struct timer_list *timer = &alg_data->mif.timer; - unsigned long expires = msecs_to_jiffies(alg_data->timeout); - - if (expires <= 1) - expires = 2; - - del_timer_sync(timer); - - dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", - jiffies, expires); - - timer->expires = jiffies + expires; - - add_timer(timer); -} - /** * i2c_pnx_start - start a device * @slave_addr: slave address @@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - del_timer_sync(&alg_data->mif.timer); - dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); @@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); - /* Stop timer. */ - del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); @@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Kill timer. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } @@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ @@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) /* Our return value. */ alg_data->mif.ret = -EIO; - /* Stop timer, to prevent timeout. */ - del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* @@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void i2c_pnx_timeout(struct timer_list *t) +static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data) { - struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer); u32 ctl; dev_err(&alg_data->adapter.dev, @@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t) iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(alg_data); alg_data->mif.ret = -EIO; - complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) @@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; + unsigned long time_left; u32 stat; dev_dbg(&alg_data->adapter.dev, @@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); - i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); @@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) break; /* Wait for completion */ - wait_for_completion(&alg_data->mif.complete); + time_left = wait_for_completion_timeout(&alg_data->mif.complete, + alg_data->timeout); + if (time_left == 0) + i2c_pnx_timeout(alg_data); if (!(rc = alg_data->mif.ret)) completed++; @@ -653,7 +624,10 @@ static int i2c_pnx_probe(struct platform_device *pdev) alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; - alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; + alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT); + if (alg_data->timeout <= 1) + alg_data->timeout = 2; + #ifdef CONFIG_OF alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); if (pdev->dev.of_node) { @@ -673,8 +647,6 @@ static int i2c_pnx_probe(struct platform_device *pdev) if (IS_ERR(alg_data->clk)) return PTR_ERR(alg_data->clk); - timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0); - snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), "%s", pdev->name); diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index 16de57846bd9..2e84776f4fbd 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c @@ -315,7 +315,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig, * this is the case if the IIO device and the trigger device share the * same parent device. */ - if (iio_validate_own_trigger(pf->indio_dev, trig)) + if (!iio_validate_own_trigger(pf->indio_dev, trig)) trig->attached_own_device = true; return ret; diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c index d6627b3e6000..66a063ea3db4 100644 --- a/drivers/iio/light/apds9306.c +++ b/drivers/iio/light/apds9306.c @@ -583,8 +583,8 @@ static int apds9306_intg_time_set(struct apds9306_data *data, int val2) return ret; intg_old = iio_gts_find_int_time_by_sel(&data->gts, intg_time_idx); - if (ret < 0) - return ret; + if (intg_old < 0) + return intg_old; if (intg_old == val2) return 0; diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index b423bec6458b..9d51f72a9d66 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -945,7 +945,7 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits) * The value is used by dm-thin to determine whether to pass down discards. The block layer * splits large discards on this boundary when this is set. */ - limits->max_discard_sectors = + limits->max_hw_discard_sectors = (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK); /* diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 4c67e2c5a82e..a7a2bcedb37e 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, struct fastrpc_phy_page pages[1]; char *name; int err; + bool scm_done = false; struct { int pgid; u32 namelen; @@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); goto err_map; } + scm_done = true; } } @@ -1320,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, goto err_invoke; kfree(args); + kfree(name); return 0; err_invoke: - if (fl->cctx->vmcount) { + if (fl->cctx->vmcount && scm_done) { u64 src_perms = 0; struct qcom_scm_vmperm dst_perms; u32 i; @@ -1693,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr { struct fastrpc_invoke_args args[2] = { 0 }; - /* Capability filled in userspace */ + /* + * Capability filled in userspace. This carries the information + * about the remoteproc support which is fetched from the remoteproc + * sysfs node by userspace. + */ dsp_attr_buf[0] = 0; + dsp_attr_buf_len -= 1; args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len; args[0].length = sizeof(dsp_attr_buf_len); args[0].fd = -1; args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1]; - args[1].length = dsp_attr_buf_len; + args[1].length = dsp_attr_buf_len * sizeof(u32); args[1].fd = -1; - fl->pd = USER_PD; return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, FASTRPC_SCALARS(0, 1, 1), args); @@ -1730,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, if (!dsp_attributes) return -ENOMEM; - err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); + err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); if (err == DSP_UNSUPPORTED_API) { dev_info(&cctx->rpdev->dev, "Warning: DSP capabilities not supported on domain: %d\n", domain); @@ -1783,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) if (err) return err; - if (copy_to_user(argp, &cap.capability, sizeof(cap.capability))) + if (copy_to_user(argp, &cap, sizeof(cap))) return -EFAULT; return 0; @@ -2080,6 +2087,16 @@ err_invoke: return err; } +static int is_attach_rejected(struct fastrpc_user *fl) +{ + /* Check if the device node is non-secure */ + if (!fl->is_secure_dev) { + dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n"); + return -EACCES; + } + return 0; +} + static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -2092,13 +2109,19 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, err = fastrpc_invoke(fl, argp); break; case FASTRPC_IOCTL_INIT_ATTACH: - err = fastrpc_init_attach(fl, ROOT_PD); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_attach(fl, ROOT_PD); break; case FASTRPC_IOCTL_INIT_ATTACH_SNS: - err = fastrpc_init_attach(fl, SENSORS_PD); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_attach(fl, SENSORS_PD); break; case FASTRPC_IOCTL_INIT_CREATE_STATIC: - err = fastrpc_init_create_static_process(fl, argp); + err = is_attach_rejected(fl); + if (!err) + err = fastrpc_init_create_static_process(fl, argp); break; case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c index 16695cb5e69c..7c3d8bedf90b 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c @@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off, buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); } - ret = byte; error: release_sys_lock(priv); return ret; @@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off, goto error; } } - ret = byte; error: release_sys_lock(priv); return ret; @@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off, buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET)); } - ret = byte; error: release_sys_lock(priv); return ret; @@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off, goto error; } } - ret = byte; error: release_sys_lock(priv); return ret; diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index 1ec65d87488a..d02f6e881139 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -28,8 +28,8 @@ #define MEI_VSC_MAX_MSG_SIZE 512 -#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC) -#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC) +#define MEI_VSC_POLL_DELAY_US (100 * USEC_PER_MSEC) +#define MEI_VSC_POLL_TIMEOUT_US (400 * USEC_PER_MSEC) #define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw)) diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index 596a9d695dfc..084d0205f97d 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -204,7 +204,7 @@ struct vsc_img_frag { /** * struct vsc_fw_loader - represent vsc firmware loader - * @dev: device used to request fimware + * @dev: device used to request firmware * @tp: transport layer used with the firmware loader * @csi: CSI image * @ace: ACE image diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index e6a98dba8a73..1618cca9a731 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -331,12 +331,12 @@ int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) return ret; } - ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len); + ret = vsc_tp_dev_xfer(tp, tp->tx_buf, ibuf ? tp->rx_buf : NULL, len); if (ret) return ret; if (ibuf) - cpu_to_be32_array(ibuf, tp->rx_buf, words); + be32_to_cpu_array(ibuf, tp->rx_buf, words); return ret; } @@ -568,6 +568,19 @@ static void vsc_tp_remove(struct spi_device *spi) free_irq(spi->irq, tp); } +static void vsc_tp_shutdown(struct spi_device *spi) +{ + struct vsc_tp *tp = spi_get_drvdata(spi); + + platform_device_unregister(tp->pdev); + + mutex_destroy(&tp->mutex); + + vsc_tp_reset(tp); + + free_irq(spi->irq, tp); +} + static const struct acpi_device_id vsc_tp_acpi_ids[] = { { "INTC1009" }, /* Raptor Lake */ { "INTC1058" }, /* Tiger Lake */ @@ -580,6 +593,7 @@ MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); static struct spi_driver vsc_tp_driver = { .probe = vsc_tp_probe, .remove = vsc_tp_remove, + .shutdown = vsc_tp_shutdown, .driver = { .name = "vsc-tp", .acpi_match_table = vsc_tp_acpi_ids, diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index d7427894e0bc..c302eb380e42 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -224,6 +224,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host, } p = sgm->addr; + if (n > sgm->length) + n = sgm->length; + /* NOTE: we never transfer more than rw_threshold bytes * to/from the fifo here; there's no I/O overlap. * This also assumes that access width( i.e. ACCWD) is 4 bytes diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 112584aa0772..fbf7a91bed35 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -4727,6 +4727,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { host->max_adma = 65532; /* 32-bit alignment */ mmc->max_seg_size = 65535; + /* + * sdhci_adma_table_pre() expects to define 1 DMA + * descriptor per segment, so the maximum segment size + * is set accordingly. SDHCI allows up to 64KiB per DMA + * descriptor (16-bit field), but some controllers do + * not support "zero means 65536" reducing the maximum + * for them to 65535. That is a problem if PAGE_SIZE is + * 64KiB because the block layer does not support + * max_seg_size < PAGE_SIZE, however + * sdhci_adma_table_pre() has a workaround to handle + * that case, and split the descriptor. Refer also + * comment in sdhci_adma_table_pre(). + */ + if (mmc->max_seg_size < PAGE_SIZE) + mmc->max_seg_size = PAGE_SIZE; } else { mmc->max_seg_size = 65536; } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 02f07b870f10..268949939636 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1047,31 +1047,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(lan9303_mib); } -static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) +static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum) { struct lan9303 *chip = ds->priv; int phy_base = chip->phy_addr_base; - if (phy == phy_base) + if (port == 0) return lan9303_virt_phy_reg_read(chip, regnum); - if (phy > phy_base + 2) + if (port > 2) return -ENODEV; - return chip->ops->phy_read(chip, phy, regnum); + return chip->ops->phy_read(chip, phy_base + port, regnum); } -static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, +static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { struct lan9303 *chip = ds->priv; int phy_base = chip->phy_addr_base; - if (phy == phy_base) + if (port == 0) return lan9303_virt_phy_reg_write(chip, regnum, val); - if (phy > phy_base + 2) + if (port > 2) return -ENODEV; - return chip->ops->phy_write(chip, phy, regnum, val); + return chip->ops->phy_write(chip, phy_base + port, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -1099,7 +1099,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port); lan9303_disable_processing_port(chip, port); - lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN); } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, @@ -1374,8 +1374,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { - int base; - chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL); if (!chip->ds) return -ENOMEM; @@ -1385,8 +1383,7 @@ static int lan9303_register_switch(struct lan9303 *chip) chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops; - base = chip->phy_addr_base; - chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); + chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0); return dsa_register_switch(chip->ds); } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index a806dadc4196..20c6529ec135 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev) dev_err(dev, "Cannot create eth interface %d\n", i); bcmasp_remove_intfs(priv); of_node_put(intf_node); + ret = -ENOMEM; goto of_put_exit; } list_add_tail(&intf->list, &priv->intfs); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 220d05e2f6fa..bb3be33c1bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5970,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, struct bnxt_ntuple_filter *fltr) { - struct bnxt_rss_ctx *rss_ctx, *tmp; u16 rxq = fltr->base.rxq; if (fltr->base.flags & BNXT_ACT_RSS_CTX) { - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { - if (rss_ctx->index == fltr->base.fw_vnic_id) { - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct ethtool_rxfh_context *ctx; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; - req->dst_id = cpu_to_le16(vnic->fw_vnic_id); - break; - } + ctx = xa_load(&bp->dev->ethtool->rss_ctx, + fltr->base.fw_vnic_id); + if (ctx) { + rss_ctx = ethtool_rxfh_context_priv(ctx); + vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } return; } @@ -6219,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) { int entries; - u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6230,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); - if (!tbl) + bp->rss_indir_tbl = + kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!bp->rss_indir_tbl) return -ENOMEM; - if (rss_ctx) - rss_ctx->rss_indir_tbl = tbl; - else - bp->rss_indir_tbl = tbl; - return 0; } -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx) { u16 max_rings, max_entries, pad, i; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6257,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) max_entries = bnxt_get_rxfh_indir_size(bp->dev); if (rss_ctx) - rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); else rss_indir_tbl = &bp->rss_indir_tbl[0]; @@ -6266,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) { - u16 i, tbl_size, max_ring = 0; + u32 i, tbl_size, max_ring = 0; if (!bp->rss_indir_tbl) return 0; @@ -6323,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) - j = vnic->rss_ctx->rss_indir_tbl[i]; + j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -10195,10 +10194,12 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, struct bnxt_ntuple_filter *ntp_fltr; int i; - bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); - for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { - if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + if (netif_running(bp->dev)) { + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } } if (!all) return; @@ -10219,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); - kfree(rss_ctx->rss_indir_tbl); - list_del(&rss_ctx->list); bp->num_rss_ctx--; - clear_bit(rss_ctx->index, bp->rss_ctx_bmap); - kfree(rss_ctx); } static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; + unsigned long context; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); struct bnxt_vnic_info *vnic = &rss_ctx->vnic; if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || @@ -10240,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", rss_ctx->index); bnxt_del_one_rss_ctx(bp, rss_ctx, true); + ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); } } } -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +void bnxt_clear_rss_ctxs(struct bnxt *bp) { - struct bnxt_rss_ctx *rss_ctx = NULL; + struct ethtool_rxfh_context *ctx; + unsigned long context; - rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); - if (rss_ctx) { - rss_ctx->vnic.rss_ctx = rss_ctx; - list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); - bp->num_rss_ctx++; - } - return rss_ctx; -} + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) -{ - struct bnxt_rss_ctx *rss_ctx, *tmp; - - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - bnxt_del_one_rss_ctx(bp, rss_ctx, all); - - if (all) - bitmap_free(bp->rss_ctx_bmap); -} - -static void bnxt_init_multi_rss_ctx(struct bnxt *bp) -{ - bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); - if (bp->rss_ctx_bmap) { - /* burn index 0 since we cannot have context 0 */ - __set_bit(0, bp->rss_ctx_bmap); - INIT_LIST_HEAD(&bp->rss_ctx_list); - bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + bnxt_del_one_rss_ctx(bp, rss_ctx, false); } } @@ -12322,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, msleep(20); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -15237,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); + WARN_ON(bp->num_rss_ctx); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -15708,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp, NULL); + rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) goto init_err_pci_clean; @@ -15865,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - bnxt_init_multi_rss_ctx(bp); - + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; rc = register_netdev(dev); if (rc) @@ -15889,8 +15864,6 @@ init_err_dl: bnxt_clear_int_mode(bp); init_err_pci_clean: - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index e46bd11e52b0..6bbdc718c3a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1286,19 +1286,16 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 #define BNXT_VNIC_RSSCTX_FLAG 0x40 - struct bnxt_rss_ctx *rss_ctx; + struct ethtool_rxfh_context *rss_ctx; u32 vnic_id; }; struct bnxt_rss_ctx { - struct list_head list; struct bnxt_vnic_info vnic; - u16 *rss_indir_tbl; u8 index; }; #define BNXT_MAX_ETH_RSS_CTX 32 -#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) #define BNXT_VNIC_ID_INVALID 0xffffffff struct bnxt_hw_rings { @@ -2331,11 +2328,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; - struct list_head rss_ctx_list; - unsigned long *rss_ctx_bmap; u32 num_rss_ctx; int nr_vnics; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; u16 rss_indir_tbl_entries; u32 rss_hash_cfg; u32 rss_hash_delta; @@ -2812,8 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -2847,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bool all); -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); +void bnxt_clear_rss_ctxs(struct bnxt *bp); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index bf157f6cc042..f121a5e9691f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -970,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev, bnxt_clear_usr_fltrs(bp, true); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1210,19 +1210,18 @@ fltr_err: static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, u32 index) { - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - if (rss_ctx->index == index) - return rss_ctx; - return NULL; + ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); + if (!ctx) + return NULL; + return ethtool_rxfh_context_priv(ctx); } -static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, - struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; vnic->rss_table_size = size + HW_HASH_KEY_SIZE; vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, @@ -1801,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = rxfh->rss_context; struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1815,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev, vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->rss_context) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); - if (!rss_ctx) + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); + if (!ctx) return -EINVAL; - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); + rss_ctx = ethtool_rxfh_context_priv(ctx); vnic = &rss_ctx->vnic; } @@ -1834,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } -static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, - struct ethtool_rxfh_param *rxfh) +static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, + struct bnxt_rss_ctx *rss_ctx, + const struct ethtool_rxfh_param *rxfh) { if (rxfh->key) { if (rss_ctx) { @@ -1848,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, } if (rxfh->indir) { u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; if (rss_ctx) - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); for (i = 0; i < tbl_size; i++) indir_tbl[i] = rxfh->indir[i]; pad = bp->rss_indir_tbl_entries - tbl_size; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); } } -static int bnxt_set_rxfh_context(struct bnxt *bp, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static int bnxt_rxfh_context_check(struct bnxt *bp, + struct netlink_ext_ack *extack) { - u32 *rss_context = &rxfh->rss_context; - struct bnxt_rss_ctx *rss_ctx; - struct bnxt_vnic_info *vnic; - bool modify = false; - int bit_id; - int rc; - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); return -EOPNOTSUPP; @@ -1881,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -EAGAIN; } - if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); - if (!rss_ctx) { - NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", - *rss_context); - return -EINVAL; - } - if (*rss_context && rxfh->rss_delete) { - bnxt_del_one_rss_ctx(bp, rss_ctx, true); - return 0; - } - modify = true; - vnic = &rss_ctx->vnic; - goto modify_context; - } + return 0; +} + +static int bnxt_create_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", @@ -1908,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -ENOMEM; } - rss_ctx = bnxt_alloc_rss_ctx(bp); - if (!rss_ctx) - return -ENOMEM; + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bp->num_rss_ctx++; vnic = &rss_ctx->vnic; + vnic->rss_ctx = ctx; vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; vnic->vnic_id = BNXT_VNIC_ID_INVALID; - rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); - if (rc) - goto out; - - rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); + rc = bnxt_alloc_vnic_rss_table(bp, vnic); if (rc) goto out; - bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + bnxt_set_dflt_rss_indir_tbl(bp, ctx); memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); @@ -1937,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); goto out; } -modify_context: - bnxt_modify_rss(bp, rss_ctx, rxfh); - - if (modify) - return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); rc = __bnxt_setup_vnic_p5(bp, vnic); if (rc) { @@ -1949,21 +1937,47 @@ modify_context: goto out; } - bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, - BNXT_RSS_CTX_BMAP_LEN, 0); - if (bit_id < 0) { - rc = -ENOMEM; - goto out; - } - rss_ctx->index = (u16)bit_id; - *rss_context = rss_ctx->index; - + rss_ctx->index = rxfh->rss_context; return 0; out: bnxt_del_one_rss_ctx(bp, rss_ctx, true); return rc; } +static int bnxt_modify_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); + + return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); +} + +static int bnxt_remove_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1974,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return bnxt_set_rxfh_context(bp, rxfh, extack); - - bnxt_modify_rss(bp, NULL, rxfh); + bnxt_modify_rss(bp, NULL, NULL, rxfh); bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { @@ -5269,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, .cap_rss_ctx_supported = 1, + .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX, + .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, + .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | @@ -5306,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, .set_rxfh = bnxt_set_rxfh, + .create_rxfh_context = bnxt_create_rxfh_context, + .modify_rxfh_context = bnxt_modify_rxfh_context, + .remove_rxfh_context = bnxt_remove_rxfh_context, .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index a5ebd7110e07..986f43d27711 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -416,7 +416,7 @@ struct bna_ib { /* Tx object */ /* Tx datapath control structure */ -#define BNA_Q_NAME_SIZE 16 +#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6) struct bna_tcb { /* Fast path */ void **sw_qpt; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index fe121d36112d..ece6f3b48327 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, for (i = 0; i < num_txqs; i++) { vector_num = tx_info->tcb[i]->intr_vector; - sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, - tx_id + tx_info->tcb[i]->id); + snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d", + bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_tx, 0, tx_info->tcb[i]->name, @@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, for (i = 0; i < num_rxps; i++) { vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; - sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", - bnad->netdev->name, - rx_id + rx_info->rx_ctrl[i].ccb->id); + snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE, + "%s CQ %d", bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_rx, 0, rx_info->rx_ctrl[i].ccb->name, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 4fee74c024bd..aad470e9caea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, u32 last_fqid = 0; ssize_t bytes = 0; char *str; - int i = 0; list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { switch (fq->fq_type) { @@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, prev = fq; prevstr = str; - i++; } if (prev) { diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index e0287fbd501d..0375c7448a57 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -384,17 +384,6 @@ config IGC_LEDS Optional support for controlling the NIC LED's with the netdev LED trigger. -config IDPF - tristate "Intel(R) Infrastructure Data Path Function Support" - depends on PCI_MSI - select DIMLIB - select PAGE_POOL - select PAGE_POOL_STATS - help - This driver supports Intel(R) Infrastructure Data Path Function - devices. - - To compile this driver as a module, choose M here. The module - will be called idpf. +source "drivers/net/ethernet/intel/idpf/Kconfig" endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2e98a2a0bead..ce227b56cf72 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1109,6 +1109,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) } /** + * e1000e_force_smbus - Force interfaces to transition to SMBUS mode. + * @hw: pointer to the HW structure + * + * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already + * acquired. + * + * Return: 0 on success, negative errno on failure. + **/ +static s32 e1000e_force_smbus(struct e1000_hw *hw) +{ + u16 smb_ctrl = 0; + u32 ctrl_ext; + s32 ret_val; + + /* Switching PHY interface always returns MDI error + * so disable retry mechanism to avoid wasting time + */ + e1000e_disable_phy_retry(hw); + + /* Force SMBus mode in the PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl); + if (ret_val) { + e1000e_enable_phy_retry(hw); + return ret_val; + } + + smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl); + + e1000e_enable_phy_retry(hw); + + /* Force SMBus mode in the MAC */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, ctrl_ext); + + return 0; +} + +/** * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @to_sx: boolean indicating a system power state transition to Sx @@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + if (hw->mac.type != e1000_pch_mtp) { + ret_val = e1000e_force_smbus(hw); + if (ret_val) { + e_dbg("Failed to force SMBUS: %d\n", ret_val); + goto release; + } + } + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ @@ -1225,27 +1273,12 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) } release: - /* Switching PHY interface always returns MDI error - * so disable retry mechanism to avoid wasting time - */ - e1000e_disable_phy_retry(hw); - - /* Force SMBus mode in PHY */ - ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); - if (ret_val) { - e1000e_enable_phy_retry(hw); - hw->phy.ops.release(hw); - goto out; + if (hw->mac.type == e1000_pch_mtp) { + ret_val = e1000e_force_smbus(hw); + if (ret_val) + e_dbg("Failed to force SMBUS over MTL system: %d\n", + ret_val); } - phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; - e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); - - e1000e_enable_phy_retry(hw); - - /* Force SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); hw->phy.ops.release(hw); out: diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ee86d2c53079..55b5bb884d73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; - /* aq_rc is invalid if AQ timed out */ - if (aq_ret == -EIO) - return -EAGAIN; - if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8535fb5c4e46..cbcfada7b357 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13292,6 +13292,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, bool need_reset; int i; + /* VSI shall be deleted in a moment, block loading new programs */ + if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + /* Don't allow frames that span over multiple buffers */ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); @@ -13300,14 +13304,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); - if (need_reset) i40e_prep_for_reset(pf); - /* VSI shall be deleted in a moment, just return EINVAL */ - if (test_bit(__I40E_IN_REMOVE, pf->state)) - return -EINVAL; - old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig new file mode 100644 index 000000000000..1addd663acad --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config IDPF + tristate "Intel(R) Infrastructure Data Path Function Support" + depends on PCI_MSI + select DIMLIB + select LIBETH + help + This driver supports Intel(R) Infrastructure Data Path Function + devices. + + To compile this driver as a module, choose M here. The module + will be called idpf. + +if IDPF + +config IDPF_SINGLEQ + bool "idpf singleq support" + help + This option enables support for legacy single Rx/Tx queues w/no + completion and fill queues. Only enable if you have hardware which + wants to work in this mode as it increases the driver size and adds + runtme checks on hotpath. + +endif # IDPF diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 6844ead2f3ac..2ce01a0b5898 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -12,7 +12,8 @@ idpf-y := \ idpf_ethtool.o \ idpf_lib.o \ idpf_main.o \ - idpf_singleq_txrx.o \ idpf_txrx.o \ idpf_virtchnl.o \ idpf_vf_dev.o + +idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index e7a036538246..2c31ad87587a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -17,10 +17,8 @@ struct idpf_vport_max_q; #include <linux/sctp.h> #include <linux/ethtool_netlink.h> #include <net/gro.h> -#include <linux/dim.h> #include "virtchnl2.h" -#include "idpf_lan_txrx.h" #include "idpf_txrx.h" #include "idpf_controlq.h" @@ -266,7 +264,6 @@ struct idpf_port_stats { * the worst case. * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping * @bufq_desc_count: Buffer queue descriptor count - * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) * @num_rxq_grp: Number of RX queues in a group * @rxq_grps: Total number of RX groups. Number of groups * number of RX per * group will yield total number of RX queues. @@ -302,7 +299,7 @@ struct idpf_vport { u16 num_txq_grp; struct idpf_txq_group *txq_grps; u32 txq_model; - struct idpf_queue **txqs; + struct idpf_tx_queue **txqs; bool crc_enable; u16 num_rxq; @@ -310,11 +307,10 @@ struct idpf_vport { u32 rxq_desc_count; u8 num_bufqs_per_qgrp; u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; - u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; u16 num_rxq_grp; struct idpf_rxq_group *rxq_grps; u32 rxq_model; - struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; + struct libeth_rx_pt *rx_ptype_lkup; struct idpf_adapter *adapter; struct net_device *netdev; @@ -601,7 +597,8 @@ struct idpf_adapter { */ static inline int idpf_is_queue_model_split(u16 q_model) { - return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; + return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) || + q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; } #define idpf_is_cap_ena(adapter, field, flag) \ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 1885ba618981..3806ddd3ce4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -437,22 +437,24 @@ struct idpf_stats { .stat_offset = offsetof(_type, _stat) \ } -/* Helper macro for defining some statistics related to queues */ -#define IDPF_QUEUE_STAT(_name, _stat) \ - IDPF_STAT(struct idpf_queue, _name, _stat) +/* Helper macros for defining some statistics related to queues */ +#define IDPF_RX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_rx_queue, _name, _stat) +#define IDPF_TX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_tx_queue, _name, _stat) /* Stats associated with a Tx queue */ static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), - IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), + IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), }; /* Stats associated with an Rx queue */ static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), - IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), + IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), }; #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) @@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < vport_config->max_q.max_rxq; i++) idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i); - - page_pool_ethtool_stats_get_strings(data); } /** @@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; u16 max_txq, max_rxq; - unsigned int size; if (sset != ETH_SS_STATS) return -EINVAL; @@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) max_txq = vport_config->max_q.max_txq; max_rxq = vport_config->max_q.max_rxq; - size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); - size += page_pool_ethtool_stats_get_count(); - - return size; } /** @@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. If the pointer is null, data will be zero'd. */ -static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, +static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, const struct idpf_stats *stat) { char *p; @@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * idpf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @q: the queue to copy + * @type: type of the queue * * Queue statistics must be copied while protected by u64_stats_fetch_begin, * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats @@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * * This function expects to be called while under rcu_read_lock(). */ -static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +static void idpf_add_queue_stats(u64 **data, const void *q, + enum virtchnl2_queue_type type) { + const struct u64_stats_sync *stats_sync; const struct idpf_stats *stats; unsigned int start; unsigned int size; unsigned int i; - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { size = IDPF_RX_QUEUE_STATS_LEN; stats = idpf_gstrings_rx_queue_stats; + stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; } else { size = IDPF_TX_QUEUE_STATS_LEN; stats = idpf_gstrings_tx_queue_stats; + stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; } /* To avoid invalid statistics values, ensure that we keep retrying @@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) * u64_stats_fetch_retry. */ do { - start = u64_stats_fetch_begin(&q->stats_sync); + start = u64_stats_fetch_begin(stats_sync); for (i = 0; i < size; i++) idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); - } while (u64_stats_fetch_retry(&q->stats_sync, start)); + } while (u64_stats_fetch_retry(stats_sync, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; @@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < num_rxq; j++) { u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; struct idpf_rx_queue_stats *stats; - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; unsigned int start; if (idpf_is_queue_model_split(vport->rxq_model)) @@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&rxq->stats_sync); - stats = &rxq->q_stats.rx; + stats = &rxq->q_stats; hw_csum_err = u64_stats_read(&stats->hw_csum_err); hsplit = u64_stats_read(&stats->hsplit_pkts); hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); @@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < txq_grp->num_txq; j++) { u64 linearize, qbusy, skb_drops, dma_map_errs; - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue_stats *stats; unsigned int start; @@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&txq->stats_sync); - stats = &txq->q_stats.tx; + stats = &txq->q_stats; linearize = u64_stats_read(&stats->linearize); qbusy = u64_stats_read(&stats->q_busy); skb_drops = u64_stats_read(&stats->skb_drops); @@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; - struct page_pool_stats pp_stats = { }; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, qtype = VIRTCHNL2_QUEUE_TYPE_TX; for (j = 0; j < txq_grp->num_txq; j++, total++) { - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; if (!txq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, txq); + idpf_add_queue_stats(&data, txq, qtype); } } @@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, num_rxq = rxq_grp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, total++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (is_splitq) rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; @@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!rxq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, rxq); - - /* In splitq mode, don't get page pool stats here since - * the pools are attached to the buffer queues - */ - if (is_splitq) - continue; - - if (rxq) - page_pool_get_stats(rxq->pp, &pp_stats); - } - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *rxbufq = - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; - - page_pool_get_stats(rxbufq->pp, &pp_stats); + idpf_add_queue_stats(&data, rxq, qtype); } } for (; total < vport_config->max_q.max_rxq; total++) idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); - page_pool_ethtool_stats_get(data, &pp_stats); - rcu_read_unlock(); idpf_vport_ctrl_unlock(netdev); } /** - * idpf_find_rxq - find rxq from q index + * idpf_find_rxq_vec - find rxq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to rx queue + * returns pointer to rx vector */ -static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp, q_idx; if (!idpf_is_queue_model_split(vport->rxq_model)) - return vport->rxq_grps->singleq.rxqs[q_num]; + return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; - return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; + return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; } /** - * idpf_find_txq - find txq from q index + * idpf_find_txq_vec - find txq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to tx queue + * returns pointer to tx vector */ -static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp; if (!idpf_is_queue_model_split(vport->txq_model)) - return vport->txqs[q_num]; + return vport->txqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; - return vport->txq_grps[q_grp].complq; + return vport->txq_grps[q_grp].complq->q_vector; } /** * __idpf_get_q_coalesce - get ITR values for specific queue * @ec: ethtool structure to fill with driver's coalesce settings - * @q: quuee of Rx or Tx + * @q_vector: queue vector corresponding to this queue + * @type: queue type */ static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q) + const struct idpf_q_vector *q_vector, + enum virtchnl2_queue_type type) { - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { ec->use_adaptive_rx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); - ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q_vector->rx_itr_value; } else { ec->use_adaptive_tx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); - ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q_vector->tx_itr_value; } } @@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, u32 q_num) { - struct idpf_netdev_priv *np = netdev_priv(netdev); - struct idpf_vport *vport; + const struct idpf_netdev_priv *np = netdev_priv(netdev); + const struct idpf_vport *vport; int err = 0; idpf_vport_ctrl_lock(netdev); @@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev, } if (q_num < vport->num_rxq) - __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_RX); if (q_num < vport->num_txq) - __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_TX); unlock_mutex: idpf_vport_ctrl_unlock(netdev); @@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings - * @q: queue for which itr values has to be set + * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ -static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q, bool is_rxq) +static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; - struct idpf_q_vector *qv = q->q_vector; bool is_dim_ena = false; u16 itr_val; @@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, itr_val = qv->tx_itr_value; } if (coalesce_usecs != itr_val && use_adaptive_coalesce) { - netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); return -EINVAL; } @@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, return 0; if (coalesce_usecs > IDPF_ITR_MAX) { - netdev_err(q->vport->netdev, + netdev_err(qv->vport->netdev, "Invalid value, %d-usecs range is 0-%d\n", coalesce_usecs, IDPF_ITR_MAX); @@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, if (coalesce_usecs % 2) { coalesce_usecs--; - netdev_info(q->vport->netdev, + netdev_info(qv->vport->netdev, "HW only supports even ITR values, ITR rounded to %d\n", coalesce_usecs); } @@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, * * Return 0 on success, and negative on failure */ -static int idpf_set_q_coalesce(struct idpf_vport *vport, - struct ethtool_coalesce *ec, +static int idpf_set_q_coalesce(const struct idpf_vport *vport, + const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { - struct idpf_queue *q; + struct idpf_q_vector *qv; - q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : + idpf_find_txq_vec(vport, q_num); - if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) return -EINVAL; return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index a5752dcab888..8c7f8ef8f1a1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -4,6 +4,8 @@ #ifndef _IDPF_LAN_TXRX_H_ #define _IDPF_LAN_TXRX_H_ +#include <linux/bits.h> + enum idpf_rss_hash { IDPF_HASH_INVALID = 0, /* Values 1 - 28 are reserved for future use */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index f1ee5584e8fa..5dbf2b4ba1b0 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -4,8 +4,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" -static const struct net_device_ops idpf_netdev_ops_splitq; -static const struct net_device_ops idpf_netdev_ops_singleq; +static const struct net_device_ops idpf_netdev_ops; /** * idpf_init_vector_stack - Fill the MSIX vector stack with vector index @@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) { clear_bit(IDPF_MB_INTR_MODE, adapter->flags); - free_irq(adapter->msix_entries[0].vector, adapter); + kfree(free_irq(adapter->msix_entries[0].vector, adapter)); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); } @@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter) */ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) { - struct idpf_q_vector *mb_vector = &adapter->mb_vector; int irq_num, mb_vidx = 0, err; + char *name; irq_num = adapter->msix_entries[mb_vidx].vector; - mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - dev_driver_string(&adapter->pdev->dev), - "Mailbox", mb_vidx); - err = request_irq(irq_num, adapter->irq_mb_handler, 0, - mb_vector->name, adapter); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", + dev_driver_string(&adapter->pdev->dev), + "Mailbox", mb_vidx); + err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter); if (err) { dev_err(&adapter->pdev->dev, "IRQ request for mailbox failed, error: %d\n", err); @@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) } /* assign netdev_ops */ - if (idpf_is_queue_model_split(vport->txq_model)) - netdev->netdev_ops = &idpf_netdev_ops_splitq; - else - netdev->netdev_ops = &idpf_netdev_ops_singleq; + netdev->netdev_ops = &idpf_netdev_ops; /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; @@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + kfree(vport->rx_ptype_lkup); + vport->rx_ptype_lkup = NULL; + unregister_netdev(vport->netdev); free_netdev(vport->netdev); vport->netdev = NULL; @@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) if (idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *q = + const struct idpf_buf_queue *q = &grp->splitq.bufq_sets[j].bufq; writel(q->next_to_alloc, q->tail); } } else { for (j = 0; j < grp->singleq.num_rxq; j++) { - struct idpf_queue *q = + const struct idpf_rx_queue *q = grp->singleq.rxqs[j]; writel(q->next_to_alloc, q->tail); @@ -1855,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_state current_state = np->state; struct idpf_adapter *adapter = vport->adapter; struct idpf_vport *new_vport; - int err, i; + int err; /* If the system is low on memory, we can end up in bad state if we * free all the memory for queue resources and try to allocate them @@ -1929,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, */ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); - /* Since idpf_vport_queues_alloc was called with new_port, the queue - * back pointers are currently pointing to the local new_vport. Reset - * the backpointers to the original vport here - */ - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - int j; - - tx_qgrp->vport = vport; - for (j = 0; j < tx_qgrp->num_txq; j++) - tx_qgrp->txqs[j]->vport = vport; - - if (idpf_is_queue_model_split(vport->txq_model)) - tx_qgrp->complq->vport = vport; - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - struct idpf_queue *q; - u16 num_rxq; - int j; - - rx_qgrp->vport = vport; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) - rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; - - if (idpf_is_queue_model_split(vport->rxq_model)) - num_rxq = rx_qgrp->splitq.num_rxq_sets; - else - num_rxq = rx_qgrp->singleq.num_rxq; - - for (j = 0; j < num_rxq; j++) { - if (idpf_is_queue_model_split(vport->rxq_model)) - q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - else - q = rx_qgrp->singleq.rxqs[j]; - q->vport = vport; - } - } - if (reset_cause == IDPF_SR_Q_CHANGE) idpf_vport_alloc_vec_indexes(vport); @@ -2393,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) mem->pa = 0; } -static const struct net_device_ops idpf_netdev_ops_splitq = { - .ndo_open = idpf_open, - .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_splitq_start, - .ndo_features_check = idpf_features_check, - .ndo_set_rx_mode = idpf_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = idpf_set_mac, - .ndo_change_mtu = idpf_change_mtu, - .ndo_get_stats64 = idpf_get_stats64, - .ndo_set_features = idpf_set_features, - .ndo_tx_timeout = idpf_tx_timeout, -}; - -static const struct net_device_ops idpf_netdev_ops_singleq = { +static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_singleq_start, + .ndo_start_xmit = idpf_tx_start, .ndo_features_check = idpf_features_check, .ndo_set_rx_mode = idpf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index f784eea044bd..db476b3314c8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -8,6 +8,7 @@ #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBETH); MODULE_LICENSE("GPL"); /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 27b93592c4ba..fe64febf7436 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" /** @@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, * and gets a physical address for each memory location and programs * it and the length into the transmit base mode descriptor. */ -static void idpf_tx_singleq_map(struct idpf_queue *tx_q, +static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *first, struct idpf_tx_offload_params *offloads) { @@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_BASE_TX_DESC(tx_q, i); + tx_desc = &tx_q->base_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); /* write each descriptor with CRC bit */ - if (tx_q->vport->crc_enable) + if (idpf_queue_has(CRC_EN, tx_q)) td_cmd |= IDPF_TX_DESC_CMD_ICRC; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { @@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, * ring entry to reflect that this index is a context descriptor */ static struct idpf_base_tx_ctx_desc * -idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_base_tx_ctx_desc *ctx_desc; int ntu = txq->next_to_use; @@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); txq->tx_buf[ntu].ctx_entry = true; - ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); + ctx_desc = &txq->base_ctx[ntu]; IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); txq->next_to_use = ntu; @@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) * @txq: queue to send buffer on * @offload: offload parameter structure **/ -static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, +static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq, struct idpf_tx_offload_params *offload) { struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); @@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.lso_pkts); + u64_stats_inc(&txq->q_stats.lso_pkts); u64_stats_update_end(&txq->stats_sync); } @@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, * * Returns NETDEV_TX_OK if sent, else an error code */ -static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q) { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; @@ -409,53 +411,25 @@ out_drop: } /** - * idpf_tx_singleq_start - Selects the right Tx queue to send buffer - * @skb: send buffer - * @netdev: network interface device structure - * - * Returns NETDEV_TX_OK if sent, else an error code - */ -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev) -{ - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; - - tx_q = vport->txqs[skb_get_queue_mapping(skb)]; - - /* hardware can't handle really short frames, hardware padding works - * beyond this point - */ - if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) { - idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); - - return NETDEV_TX_OK; - } - - return idpf_tx_singleq_frame(skb, tx_q); -} - -/** * idpf_tx_singleq_clean - Reclaim resources from queue * @tx_q: Tx queue to clean * @napi_budget: Used to determine if we are in netpoll * @cleaned: returns number of packets cleaned * */ -static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, +static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, int *cleaned) { - unsigned int budget = tx_q->vport->compln_clean_budget; unsigned int total_bytes = 0, total_pkts = 0; struct idpf_base_tx_desc *tx_desc; + u32 budget = tx_q->clean_budget; s16 ntc = tx_q->next_to_clean; struct idpf_netdev_priv *np; struct idpf_tx_buf *tx_buf; - struct idpf_vport *vport; struct netdev_queue *nq; bool dont_wake; - tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); + tx_desc = &tx_q->base_tx[ntc]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } /* unmap any remaining paged data */ @@ -540,7 +514,7 @@ fetch_next_txq_desc: if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } } while (likely(budget)); @@ -550,16 +524,15 @@ fetch_next_txq_desc: *cleaned += total_pkts; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); - u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); + u64_stats_add(&tx_q->q_stats.packets, total_pkts); + u64_stats_add(&tx_q->q_stats.bytes, total_bytes); u64_stats_update_end(&tx_q->stats_sync); - vport = tx_q->vport; - np = netdev_priv(vport->netdev); - nq = netdev_get_tx_queue(vport->netdev, tx_q->idx); + np = netdev_priv(tx_q->netdev); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(vport->netdev); + !netif_carrier_ok(tx_q->netdev); __netif_txq_completed_wake(nq, total_pkts, total_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, dont_wake); @@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; for (i = 0; i < num_txq; i++) { - struct idpf_queue *q; + struct idpf_tx_queue *q; q = q_vec->tx[i]; clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, @@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc, /** * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers - * @rxq: Rx ring being processed * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * @ntc: next to clean */ -static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, - union virtchnl2_rx_desc *rx_desc, - struct sk_buff *skb, u16 ntc) +static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) @@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, * @rxq: Rx ring being processed * @skb: skb currently being received and modified * @csum_bits: checksum bits from descriptor - * @ptype: the packet type decoded by hardware + * @decoded: the packet type decoded by hardware * * skb->protocol must be set before this function is called */ -static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - u16 ptype) +static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, + struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { - struct idpf_rx_ptype_decoded decoded; bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (unlikely(!(csum_bits->l3l4p))) - return; - - decoded = rxq->vport->rx_ptype_lkup[ptype]; - if (unlikely(!(decoded.known && decoded.outer_ip))) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; /* Check if there were any checksum errors */ - if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; /* Device could not do any checksum offload for certain extension * headers as indicated by setting IPV6EXADD bit */ - if (unlikely(ipv6 && csum_bits->ipv6exadd)) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed */ - if (unlikely(csum_bits->l4e)) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - if (unlikely(csum_bits->nat && csum_bits->eudpe)) + if (unlikely(csum_bits.nat && csum_bits.eudpe)) goto checksum_fail; /* Handle packets that were not able to be checksummed due to arrival * speed, in this case the stack can compute the csum. */ - if (unlikely(csum_bits->pprs)) + if (unlikely(csum_bits.pprs)) return; /* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - case IDPF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - default: - return; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u32 rx_error, rx_status; u64 qword; @@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, rx_status); csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, rx_status); - csum_bits.nat = 0; - csum_bits.eudpe = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u16 rx_status0, rx_status1; rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); @@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, rx_status0); csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, rx_status1); - csum_bits.pprs = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** @@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. **/ -static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { u64 mask, qw1; - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; @@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, if (FIELD_GET(mask, qw1) == mask) { u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. **/ -static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, - le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) - skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), - idpf_ptype_to_htype(decoded)); + le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) { + u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash); + + libeth_rx_pt_set_hash(skb, hash, decoded); + } } /** @@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ -static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static void +idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, + struct sk_buff *skb, + const union virtchnl2_rx_desc *rx_desc, + u16 ptype) { - struct idpf_rx_ptype_decoded decoded = - rx_q->vport->rx_ptype_lkup[ptype]; + struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype]; + struct idpf_rx_csum_decoded csum_bits; /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); + skb->protocol = eth_type_trans(skb, rx_q->netdev); /* Check if we're using base mode descriptor IDs */ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { - idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_base_csum(rx_desc); } else { - idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_flex_csum(rx_desc); } + + idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded); + skb_record_rx_queue(skb, rx_q->idx); +} + +/** + * idpf_rx_buf_hw_update - Store the new tail and head values + * @rxq: queue to bump + * @val: new head index + */ +static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val) +{ + rxq->next_to_use = val; + + if (unlikely(!rxq->tail)) + return; + + /* writel has an implicit memory barrier */ + writel(val, rxq->tail); } /** @@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, * * Returns false if all allocations were successful, true if any fail */ -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, u16 cleaned_count) { struct virtchnl2_singleq_rx_buf_desc *desc; + const struct libeth_fq_fp fq = { + .pp = rx_q->pp, + .fqes = rx_q->rx_buf, + .truesize = rx_q->truesize, + .count = rx_q->desc_count, + }; u16 nta = rx_q->next_to_alloc; - struct idpf_rx_buf *buf; if (!cleaned_count) return false; - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); - buf = &rx_q->rx_buf.buf[nta]; + desc = &rx_q->single_buf[nta]; do { dma_addr_t addr; - addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, nta); + if (addr == DMA_MAPPING_ERROR) break; /* Refresh the desc even if buffer_addrs didn't change @@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, desc->hdr_addr = 0; desc++; - buf++; nta++; if (unlikely(nta == rx_q->desc_count)) { - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); - buf = rx_q->rx_buf.buf; + desc = &rx_q->single_buf[0]; nta = 0; } @@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. */ -static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { u64 qword; @@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. */ -static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); @@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, * @fields: storage for extracted values * */ -static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, + const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_base_fields(rx_desc, fields); else - idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } /** @@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) +static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; struct sk_buff *skb = rx_q->skb; @@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) struct idpf_rx_buf *rx_buf; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - rx_desc = IDPF_RX_DESC(rx_q, ntc); + rx_desc = &rx_q->rx[ntc]; /* status_error_ptype_len will always be zero for unused * descriptors because it's cleared in cleanup, and overlaps @@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); - rx_buf = &rx_q->rx_buf.buf[ntc]; - if (!fields.size) { - idpf_rx_put_page(rx_buf); + rx_buf = &rx_q->rx_buf[ntc]; + if (!libeth_rx_sync_for_cpu(rx_buf, fields.size)) goto skip_data; - } - idpf_rx_sync_for_cpu(rx_buf, fields.size); if (skb) idpf_rx_add_frag(rx_buf, skb, fields.size); else - skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); + skb = idpf_rx_build_skb(rx_buf, fields.size); /* exit if we failed to retrieve a buffer */ if (!skb) break; skip_data: - IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); + rx_buf->page = NULL; + IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; /* skip if it is non EOP desc */ - if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) + if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb)) continue; #define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ @@ -1084,7 +1051,7 @@ skip_data: rx_desc, fields.rx_ptype); /* send completed skb up the stack */ - napi_gro_receive(&rx_q->q_vector->napi, skb); + napi_gro_receive(rx_q->pp->p.napi, skb); skb = NULL; /* update budget accounting */ @@ -1095,12 +1062,13 @@ skip_data: rx_q->next_to_clean = ntc; + page_pool_nid_changed(rx_q->pp, numa_mem_id()); if (cleaned_count) failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); u64_stats_update_begin(&rx_q->stats_sync); - u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts); + u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rx_q->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b023704bbbda..af2879f03b8d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1,9 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" #include "idpf_virtchnl.h" +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count); + /** * idpf_buf_lifo_push - push a buffer pointer onto stack * @stack: pointer to stack struct @@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @tx_q: the queue that owns the buffer * @tx_buf: the buffer to free */ -static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) +static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q, + struct idpf_tx_buf *tx_buf) { if (tx_buf->skb) { if (dma_unmap_len(tx_buf, len)) @@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) * idpf_tx_buf_rel_all - Free any empty Tx buffers * @txq: queue to be cleaned */ -static void idpf_tx_buf_rel_all(struct idpf_queue *txq) +static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { + struct idpf_buf_lifo *buf_stack; u16 i; /* Buffers already cleared, nothing to do */ @@ -101,39 +108,58 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq) kfree(txq->tx_buf); txq->tx_buf = NULL; - if (!txq->buf_stack.bufs) + if (!idpf_queue_has(FLOW_SCH_EN, txq)) return; - for (i = 0; i < txq->buf_stack.size; i++) - kfree(txq->buf_stack.bufs[i]); + buf_stack = &txq->stash->buf_stack; + if (!buf_stack->bufs) + return; - kfree(txq->buf_stack.bufs); - txq->buf_stack.bufs = NULL; + for (i = 0; i < buf_stack->size; i++) + kfree(buf_stack->bufs[i]); + + kfree(buf_stack->bufs); + buf_stack->bufs = NULL; } /** * idpf_tx_desc_rel - Free Tx resources per queue * @txq: Tx descriptor ring for a specific queue - * @bufq: buffer q or completion q * * Free all transmit software resources */ -static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) +static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) { - if (bufq) - idpf_tx_buf_rel_all(txq); + idpf_tx_buf_rel_all(txq); if (!txq->desc_ring) return; dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); txq->desc_ring = NULL; - txq->next_to_alloc = 0; txq->next_to_use = 0; txq->next_to_clean = 0; } /** + * idpf_compl_desc_rel - Free completion resources per queue + * @complq: completion queue + * + * Free all completion software resources. + */ +static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) +{ + if (!complq->comp) + return; + + dma_free_coherent(complq->netdev->dev.parent, complq->size, + complq->comp, complq->dma); + complq->comp = NULL; + complq->next_to_use = 0; + complq->next_to_clean = 0; +} + +/** * idpf_tx_desc_rel_all - Free Tx Resources for All Queues * @vport: virtual port structure * @@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) - idpf_tx_desc_rel(txq_grp->txqs[j], true); + idpf_tx_desc_rel(txq_grp->txqs[j]); if (idpf_is_queue_model_split(vport->txq_model)) - idpf_tx_desc_rel(txq_grp->complq, false); + idpf_compl_desc_rel(txq_grp->complq); } } @@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) * * Returns 0 on success, negative on failure */ -static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) +static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) { + struct idpf_buf_lifo *buf_stack; int buf_size; int i; @@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) for (i = 0; i < tx_q->desc_count; i++) tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) + return 0; + + buf_stack = &tx_q->stash->buf_stack; + /* Initialize tx buf stack for out-of-order completions if * flow scheduling offload is enabled */ - tx_q->buf_stack.bufs = - kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs) + buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), + GFP_KERNEL); + if (!buf_stack->bufs) return -ENOMEM; - tx_q->buf_stack.size = tx_q->desc_count; - tx_q->buf_stack.top = tx_q->desc_count; + buf_stack->size = tx_q->desc_count; + buf_stack->top = tx_q->desc_count; for (i = 0; i < tx_q->desc_count; i++) { - tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs[i]) + buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), + GFP_KERNEL); + if (!buf_stack->bufs[i]) return -ENOMEM; } @@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) /** * idpf_tx_desc_alloc - Allocate the Tx descriptors + * @vport: vport to allocate resources for * @tx_q: the tx ring to set up - * @bufq: buffer or completion queue * * Returns 0 on success, negative on failure */ -static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) +static int idpf_tx_desc_alloc(const struct idpf_vport *vport, + struct idpf_tx_queue *tx_q) { struct device *dev = tx_q->dev; - u32 desc_sz; int err; - if (bufq) { - err = idpf_tx_buf_alloc_all(tx_q); - if (err) - goto err_alloc; - - desc_sz = sizeof(struct idpf_base_tx_desc); - } else { - desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); - } + err = idpf_tx_buf_alloc_all(tx_q); + if (err) + goto err_alloc; - tx_q->size = tx_q->desc_count * desc_sz; + tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); /* Allocate descriptors also round up to nearest 4K */ tx_q->size = ALIGN(tx_q->size, 4096); @@ -238,20 +263,44 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) goto err_alloc; } - tx_q->next_to_alloc = 0; tx_q->next_to_use = 0; tx_q->next_to_clean = 0; - set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); + idpf_queue_set(GEN_CHK, tx_q); return 0; err_alloc: - idpf_tx_desc_rel(tx_q, bufq); + idpf_tx_desc_rel(tx_q); return err; } /** + * idpf_compl_desc_alloc - allocate completion descriptors + * @vport: vport to allocate resources for + * @complq: completion queue to set up + * + * Return: 0 on success, -errno on failure. + */ +static int idpf_compl_desc_alloc(const struct idpf_vport *vport, + struct idpf_compl_queue *complq) +{ + complq->size = array_size(complq->desc_count, sizeof(*complq->comp)); + + complq->comp = dma_alloc_coherent(complq->netdev->dev.parent, + complq->size, &complq->dma, + GFP_KERNEL); + if (!complq->comp) + return -ENOMEM; + + complq->next_to_use = 0; + complq->next_to_clean = 0; + idpf_queue_set(GEN_CHK, complq); + + return 0; +} + +/** * idpf_tx_desc_alloc_all - allocate all queues Tx resources * @vport: virtual port private structure * @@ -259,7 +308,6 @@ err_alloc: */ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; int err = 0; int i, j; @@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) */ for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { - struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; + struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; u8 gen_bits = 0; u16 bufidx_mask; - err = idpf_tx_desc_alloc(txq, true); + err = idpf_tx_desc_alloc(vport, txq); if (err) { - dev_err(dev, "Allocation for Tx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Queue %u failed\n", i); goto err_out; } @@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) continue; /* Setup completion queues */ - err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); + err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); if (err) { - dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Completion Queue %u failed\n", i); goto err_out; } @@ -329,70 +379,97 @@ err_out: /** * idpf_rx_page_rel - Release an rx buffer page - * @rxq: the queue that owns the buffer * @rx_buf: the buffer to free */ -static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) +static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { if (unlikely(!rx_buf->page)) return; - page_pool_put_full_page(rxq->pp, rx_buf->page, false); + page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); rx_buf->page = NULL; - rx_buf->page_offset = 0; + rx_buf->offset = 0; } /** * idpf_rx_hdr_buf_rel_all - Release header buffer memory - * @rxq: queue to use + * @bufq: queue to use */ -static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; + struct libeth_fq fq = { + .fqes = bufq->hdr_buf, + .pp = bufq->hdr_pp, + }; - dma_free_coherent(&adapter->pdev->dev, - rxq->desc_count * IDPF_HDR_BUF_SIZE, - rxq->rx_buf.hdr_buf_va, - rxq->rx_buf.hdr_buf_pa); - rxq->rx_buf.hdr_buf_va = NULL; + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->hdr_buf[i]); + + libeth_rx_fq_destroy(&fq); + bufq->hdr_buf = NULL; + bufq->hdr_pp = NULL; } /** - * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue - * @rxq: queue to be cleaned + * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue + * @bufq: queue to be cleaned */ -static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq) { - u16 i; + struct libeth_fq fq = { + .fqes = bufq->buf, + .pp = bufq->pp, + }; /* queue already cleared, nothing to do */ - if (!rxq->rx_buf.buf) + if (!bufq->buf) return; /* Free all the bufs allocated and given to hw on Rx queue */ - for (i = 0; i < rxq->desc_count; i++) - idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->buf[i]); - if (rxq->rx_hsplit_en) - idpf_rx_hdr_buf_rel_all(rxq); + if (idpf_queue_has(HSPLIT_EN, bufq)) + idpf_rx_hdr_buf_rel_all(bufq); - page_pool_destroy(rxq->pp); - rxq->pp = NULL; + libeth_rx_fq_destroy(&fq); + bufq->buf = NULL; + bufq->pp = NULL; +} + +/** + * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue + * @rxq: queue to be cleaned + */ +static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) +{ + struct libeth_fq fq = { + .fqes = rxq->rx_buf, + .pp = rxq->pp, + }; + + if (!rxq->rx_buf) + return; - kfree(rxq->rx_buf.buf); - rxq->rx_buf.buf = NULL; + for (u32 i = 0; i < rxq->desc_count; i++) + idpf_rx_page_rel(&rxq->rx_buf[i]); + + libeth_rx_fq_destroy(&fq); + rxq->rx_buf = NULL; + rxq->pp = NULL; } /** * idpf_rx_desc_rel - Free a specific Rx q resources * @rxq: queue to clean the resources from - * @bufq: buffer q or completion q - * @q_model: single or split q model + * @dev: device to free DMA memory + * @model: single or split queue model * * Free a specific rx queue resources */ -static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) +static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev, + u32 model) { if (!rxq) return; @@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->skb = NULL; } - if (bufq || !idpf_is_queue_model_split(q_model)) + if (!idpf_is_queue_model_split(model)) idpf_rx_buf_rel_all(rxq); rxq->next_to_alloc = 0; @@ -411,11 +488,35 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) if (!rxq->desc_ring) return; - dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); + dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma); rxq->desc_ring = NULL; } /** + * idpf_rx_desc_rel_bufq - free buffer queue resources + * @bufq: buffer queue to clean the resources from + * @dev: device to free DMA memory + */ +static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq, + struct device *dev) +{ + if (!bufq) + return; + + idpf_rx_buf_rel_bufq(bufq); + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + if (!bufq->split_buf) + return; + + dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); + bufq->split_buf = NULL; +} + +/** * idpf_rx_desc_rel_all - Free Rx Resources for All Queues * @vport: virtual port structure * @@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) { + struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; u16 num_rxq; int i, j; @@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) if (!idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) - idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], - false, vport->rxq_model); + idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, + VIRTCHNL2_QUEUE_MODEL_SINGLE); continue; } num_rxq = rx_qgrp->splitq.num_rxq_sets; for (j = 0; j < num_rxq; j++) idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, - false, vport->rxq_model); + dev, VIRTCHNL2_QUEUE_MODEL_SPLIT); if (!rx_qgrp->splitq.bufq_sets) continue; @@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; - idpf_rx_desc_rel(&bufq_set->bufq, true, - vport->rxq_model); + idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); } } } /** * idpf_rx_buf_hw_update - Store the new tail and head values - * @rxq: queue to bump + * @bufq: queue to bump * @val: new head index */ -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) +static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val) { - rxq->next_to_use = val; + bufq->next_to_use = val; - if (unlikely(!rxq->tail)) + if (unlikely(!bufq->tail)) return; /* writel has an implicit memory barrier */ - writel(val, rxq->tail); + writel(val, bufq->tail); } /** * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers - * @rxq: ring to use + * @bufq: ring to use * * Returns 0 on success, negative on failure. */ -static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) +static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; - - rxq->rx_buf.hdr_buf_va = - dma_alloc_coherent(&adapter->pdev->dev, - IDPF_HDR_BUF_SIZE * rxq->desc_count, - &rxq->rx_buf.hdr_buf_pa, - GFP_KERNEL); - if (!rxq->rx_buf.hdr_buf_va) - return -ENOMEM; + struct libeth_fq fq = { + .count = bufq->desc_count, + .type = LIBETH_FQE_HDR, + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; + + bufq->hdr_pp = fq.pp; + bufq->hdr_buf = fq.fqes; + bufq->hdr_truesize = fq.truesize; + bufq->rx_hbuf_size = fq.buf_len; return 0; } @@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) */ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) { - u16 nta = refillq->next_to_alloc; + u32 nta = refillq->next_to_use; /* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | FIELD_PREP(IDPF_RX_BI_GEN_M, - test_bit(__IDPF_Q_GEN_CHK, refillq->flags)); + idpf_queue_has(GEN_CHK, refillq)); if (unlikely(++nta == refillq->desc_count)) { nta = 0; - change_bit(__IDPF_Q_GEN_CHK, refillq->flags); + idpf_queue_change(GEN_CHK, refillq); } - refillq->next_to_alloc = nta; + + refillq->next_to_use = nta; } /** @@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) * * Returns false if buffer could not be allocated, true otherwise. */ -static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) +static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id) { struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; + struct libeth_fq_fp fq = { + .count = bufq->desc_count, + }; u16 nta = bufq->next_to_alloc; - struct idpf_rx_buf *buf; dma_addr_t addr; - splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); - buf = &bufq->rx_buf.buf[buf_id]; + splitq_rx_desc = &bufq->split_buf[nta]; - if (bufq->rx_hsplit_en) { - splitq_rx_desc->hdr_addr = - cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + if (idpf_queue_has(HSPLIT_EN, bufq)) { + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return false; + + splitq_rx_desc->hdr_addr = cpu_to_le64(addr); } - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + fq.pp = bufq->pp; + fq.fqes = bufq->buf; + fq.truesize = bufq->truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return false; splitq_rx_desc->pkt_addr = cpu_to_le64(addr); @@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) * * Returns true if @working_set bufs were posted successfully, false otherwise. */ -static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) +static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq, + u16 working_set) { int i; @@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) return false; } - idpf_rx_buf_hw_update(bufq, - bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); + idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, + IDPF_RX_BUF_STRIDE)); return true; } /** - * idpf_rx_create_page_pool - Create a page pool - * @rxbufq: RX queue to create page pool for + * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources + * @rxq: queue for which the buffers are allocated + * + * Return: 0 on success, -ENOMEM on failure. + */ +static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq) +{ + if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) + goto err; + + return 0; + +err: + idpf_rx_buf_rel_all(rxq); + + return -ENOMEM; +} + +/** + * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs + * @rxq: buffer queue to create page pool for * - * Returns &page_pool on success, casted -errno on failure + * Return: 0 on success, -errno on failure. */ -static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq) { - struct page_pool_params pp = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rxbufq->desc_count, - .nid = NUMA_NO_NODE, - .dev = rxbufq->vport->netdev->dev.parent, - .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, + struct libeth_fq fq = { + .count = rxq->desc_count, + .type = LIBETH_FQE_MTU, + .nid = idpf_q_vector_to_mem(rxq->q_vector), }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); + if (ret) + return ret; + + rxq->pp = fq.pp; + rxq->rx_buf = fq.fqes; + rxq->truesize = fq.truesize; + rxq->rx_buf_size = fq.buf_len; - return page_pool_create(&pp); + return idpf_rx_buf_alloc_singleq(rxq); } /** * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources - * @rxbufq: queue for which the buffers are allocated; equivalent to - * rxq when operating in singleq mode + * @rxbufq: queue for which the buffers are allocated * * Returns 0 on success, negative on failure */ -static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) +static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq) { int err = 0; - /* Allocate book keeping buffers */ - rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, - sizeof(struct idpf_rx_buf), GFP_KERNEL); - if (!rxbufq->rx_buf.buf) { - err = -ENOMEM; - goto rx_buf_alloc_all_out; - } - - if (rxbufq->rx_hsplit_en) { + if (idpf_queue_has(HSPLIT_EN, rxbufq)) { err = idpf_rx_hdr_buf_alloc_all(rxbufq); if (err) goto rx_buf_alloc_all_out; } /* Allocate buffers to be given to HW. */ - if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { - int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); - - if (!idpf_rx_post_init_bufs(rxbufq, working_set)) - err = -ENOMEM; - } else { - if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, - rxbufq->desc_count - 1)) - err = -ENOMEM; - } + if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq))) + err = -ENOMEM; rx_buf_alloc_all_out: if (err) - idpf_rx_buf_rel_all(rxbufq); + idpf_rx_buf_rel_bufq(rxbufq); return err; } /** * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW - * @rxbufq: RX queue to create page pool for + * @bufq: buffer queue to create page pool for + * @type: type of Rx buffers to allocate * * Returns 0 on success, negative on failure */ -static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) +static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq, + enum libeth_fqe_type type) { - struct page_pool *pool; + struct libeth_fq fq = { + .truesize = bufq->truesize, + .count = bufq->desc_count, + .type = type, + .hsplit = idpf_queue_has(HSPLIT_EN, bufq), + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; - pool = idpf_rx_create_page_pool(rxbufq); - if (IS_ERR(pool)) - return PTR_ERR(pool); + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; - rxbufq->pp = pool; + bufq->pp = fq.pp; + bufq->buf = fq.fqes; + bufq->truesize = fq.truesize; + bufq->rx_buf_size = fq.buf_len; - return idpf_rx_buf_alloc_all(rxbufq); + return idpf_rx_buf_alloc_all(bufq); } /** @@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) */ int idpf_rx_bufs_init_all(struct idpf_vport *vport) { - struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; + bool split = idpf_is_queue_model_split(vport->rxq_model); int i, j, err; for (i = 0; i < vport->num_rxq_grp; i++) { - rx_qgrp = &vport->rxq_grps[i]; + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u32 truesize = 0; /* Allocate bufs for the rxq itself in singleq */ - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!split) { int num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_bufs_init(q); + err = idpf_rx_bufs_init_singleq(q); if (err) return err; } @@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /* Otherwise, allocate bufs for the buffer queues */ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + enum libeth_fqe_type type; + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_bufs_init(q); + q->truesize = truesize; + + type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU; + + err = idpf_rx_bufs_init(q, type); if (err) return err; + + truesize = q->truesize >> 1; } } @@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /** * idpf_rx_desc_alloc - Allocate queue Rx resources + * @vport: vport to allocate resources for * @rxq: Rx queue for which the resources are setup - * @bufq: buffer or completion queue - * @q_model: single or split queue model * * Returns 0 on success, negative on failure */ -static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) +static int idpf_rx_desc_alloc(const struct idpf_vport *vport, + struct idpf_rx_queue *rxq) { - struct device *dev = rxq->dev; + struct device *dev = &vport->adapter->pdev->dev; - if (bufq) - rxq->size = rxq->desc_count * - sizeof(struct virtchnl2_splitq_rx_buf_desc); - else - rxq->size = rxq->desc_count * - sizeof(union virtchnl2_rx_desc); + rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc); /* Allocate descriptors and also round up to nearest 4K */ rxq->size = ALIGN(rxq->size, 4096); @@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->next_to_alloc = 0; rxq->next_to_clean = 0; rxq->next_to_use = 0; - set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + idpf_queue_set(GEN_CHK, rxq); + + return 0; +} + +/** + * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring + * @vport: vport to allocate resources for + * @bufq: buffer queue for which the resources are set up + * + * Return: 0 on success, -ENOMEM on failure. + */ +static int idpf_bufq_desc_alloc(const struct idpf_vport *vport, + struct idpf_buf_queue *bufq) +{ + struct device *dev = &vport->adapter->pdev->dev; + + bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); + + bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, + GFP_KERNEL); + if (!bufq->split_buf) + return -ENOMEM; + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + idpf_queue_set(GEN_CHK, bufq); return 0; } @@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; int i, j, err; u16 num_rxq; @@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_desc_alloc(q, false, vport->rxq_model); + + err = idpf_rx_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Queue %u failed\n", i); goto err_out; } @@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) continue; for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_desc_alloc(q, true, vport->rxq_model); + + err = idpf_bufq_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Buffer Queue %u failed\n", i); goto err_out; } @@ -802,11 +981,16 @@ err_out: */ static void idpf_txq_group_rel(struct idpf_vport *vport) { + bool split, flow_sch_en; int i, j; if (!vport->txq_grps) return; + split = idpf_is_queue_model_split(vport->txq_model); + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; @@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) kfree(txq_grp->txqs[j]); txq_grp->txqs[j] = NULL; } + + if (!split) + continue; + kfree(txq_grp->complq); txq_grp->complq = NULL; + + if (flow_sch_en) + kfree(txq_grp->stashes); } kfree(vport->txq_grps); vport->txq_grps = NULL; @@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { int i, j, k = 0; - vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), + vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), GFP_KERNEL); if (!vport->txqs) @@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, /* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) { vport->num_bufqs_per_qgrp = 0; - vport->bufq_size[0] = IDPF_RX_BUF_2048; return; } vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; - /* Bufq[0] default buffer size is 4K - * Bufq[1] default buffer size is 2K - */ - vport->bufq_size[0] = IDPF_RX_BUF_4096; - vport->bufq_size[1] = IDPF_RX_BUF_2048; } /** @@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, * @q: rx queue for which descids are set * */ -static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) +static void idpf_rxq_set_descids(const struct idpf_vport *vport, + struct idpf_rx_queue *q) { - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; } else { if (vport->base_rxd) @@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) */ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) { - bool flow_sch_en; - int err, i; + bool split, flow_sch_en; + int i; vport->txq_grps = kcalloc(vport->num_txq_grp, sizeof(*vport->txq_grps), GFP_KERNEL); if (!vport->txq_grps) return -ENOMEM; + split = idpf_is_queue_model_split(vport->txq_model); flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SPLITQ_QSCHED); for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; + struct idpf_txq_stash *stashes; int j; tx_qgrp->vport = vport; @@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) for (j = 0; j < tx_qgrp->num_txq; j++) { tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), GFP_KERNEL); - if (!tx_qgrp->txqs[j]) { - err = -ENOMEM; + if (!tx_qgrp->txqs[j]) goto err_alloc; - } + } + + if (split && flow_sch_en) { + stashes = kcalloc(num_txq, sizeof(*stashes), + GFP_KERNEL); + if (!stashes) + goto err_alloc; + + tx_qgrp->stashes = stashes; } for (j = 0; j < tx_qgrp->num_txq; j++) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; q->dev = &adapter->pdev->dev; q->desc_count = vport->txq_desc_count; q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); - q->vport = vport; + q->netdev = vport->netdev; q->txq_grp = tx_qgrp; - hash_init(q->sched_buf_hash); - if (flow_sch_en) - set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + if (!split) { + q->clean_budget = vport->compln_clean_budget; + idpf_queue_assign(CRC_EN, q, + vport->crc_enable); + } + + if (!flow_sch_en) + continue; + + if (split) { + q->stash = &stashes[j]; + hash_init(q->stash->sched_buf_hash); + } + + idpf_queue_set(FLOW_SCH_EN, q); } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!split) continue; tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, sizeof(*tx_qgrp->complq), GFP_KERNEL); - if (!tx_qgrp->complq) { - err = -ENOMEM; + if (!tx_qgrp->complq) goto err_alloc; - } - tx_qgrp->complq->dev = &adapter->pdev->dev; tx_qgrp->complq->desc_count = vport->complq_desc_count; - tx_qgrp->complq->vport = vport; tx_qgrp->complq->txq_grp = tx_qgrp; + tx_qgrp->complq->netdev = vport->netdev; + tx_qgrp->complq->clean_budget = vport->compln_clean_budget; if (flow_sch_en) - __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); + idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq); } return 0; @@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) err_alloc: idpf_txq_group_rel(vport); - return err; + return -ENOMEM; } /** @@ -1238,8 +1443,6 @@ err_alloc: */ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) { - struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, k, err = 0; bool hs; @@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; int swq_size = sizeof(struct idpf_sw_queue); + struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - q->dev = &adapter->pdev->dev; q->desc_count = vport->bufq_desc_count[j]; - q->vport = vport; - q->rxq_grp = rx_qgrp; - q->idx = j; - q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; - q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); bufq_set->num_refillqs = num_rxq; bufq_set->refillqs = kcalloc(num_rxq, swq_size, @@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_sw_queue *refillq = &bufq_set->refillqs[k]; - refillq->dev = &vport->adapter->pdev->dev; refillq->desc_count = vport->bufq_desc_count[j]; - set_bit(__IDPF_Q_GEN_CHK, refillq->flags); - set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_set(GEN_CHK, refillq); + idpf_queue_set(RFL_GEN_CHK, refillq); refillq->ring = kcalloc(refillq->desc_count, - sizeof(u16), + sizeof(*refillq->ring), GFP_KERNEL); if (!refillq->ring) { err = -ENOMEM; @@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) skip_splitq_rx_init: for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (!idpf_is_queue_model_split(vport->rxq_model)) { q = rx_qgrp->singleq.rxqs[j]; goto setup_rxq; } q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - rx_qgrp->splitq.rxq_sets[j]->refillq0 = + rx_qgrp->splitq.rxq_sets[j]->refillq[0] = &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) - rx_qgrp->splitq.rxq_sets[j]->refillq1 = + rx_qgrp->splitq.rxq_sets[j]->refillq[1] = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); setup_rxq: - q->dev = &adapter->pdev->dev; q->desc_count = vport->rxq_desc_count; - q->vport = vport; - q->rxq_grp = rx_qgrp; + q->rx_ptype_lkup = vport->rx_ptype_lkup; + q->netdev = vport->netdev; + q->bufq_sets = rx_qgrp->splitq.bufq_sets; q->idx = (i * num_rxq) + j; - /* In splitq mode, RXQ buffer size should be - * set to that of the first buffer queue - * associated with this RXQ - */ - q->rx_buf_size = vport->bufq_size[0]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_max_pkt_size = vport->netdev->mtu + - IDPF_PACKET_HDR_PAD; + LIBETH_RX_LL_LEN; idpf_rxq_set_descids(vport, q); } } @@ -1445,12 +1633,13 @@ err_out: * idpf_tx_handle_sw_marker - Handle queue marker packet * @tx_q: tx queue to handle software marker */ -static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) +static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) { - struct idpf_vport *vport = tx_q->vport; + struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev); + struct idpf_vport *vport = priv->vport; int i; - clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + idpf_queue_clear(SW_MARKER, tx_q); /* Hardware must write marker packets to all queues associated with * completion queues. So check if all queues received marker packets */ @@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) /* If we're still waiting on any other TXQ marker completions, * just return now since we cannot wake up the marker_wq yet. */ - if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) + if (idpf_queue_has(SW_MARKER, vport->txqs[i])) return; /* Drain complete */ @@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) * @cleaned: pointer to stats struct to track cleaned packets/bytes * @napi_budget: Used to determine if we are in netpoll */ -static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, +static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *tx_buf, struct idpf_cleaned_stats *cleaned, int napi_budget) @@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll */ -static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, +static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, + u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, struct hlist_node *tmp_buf; /* Buffer completion */ - hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, + hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, hlist, compl_tag) { if (unlikely(stash->buf.compl_tag != (int)compl_tag)) continue; @@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, } /* Push shadow buf back onto stack */ - idpf_buf_lifo_push(&txq->buf_stack, stash); + idpf_buf_lifo_push(&txq->stash->buf_stack, stash); hash_del(&stash->hlist); } @@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, * @txq: Tx queue to clean * @tx_buf: buffer to store */ -static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, +static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, struct idpf_tx_buf *tx_buf) { struct idpf_tx_stash *stash; @@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, !dma_unmap_len(tx_buf, len))) return 0; - stash = idpf_buf_lifo_pop(&txq->buf_stack); + stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); if (unlikely(!stash)) { net_err_ratelimited("%s: No out-of-order TX buffers left!\n", - txq->vport->netdev->name); + netdev_name(txq->netdev)); return -ENOMEM; } @@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, stash->buf.compl_tag = tx_buf->compl_tag; /* Add buffer to buf_hash table to be freed later */ - hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); + hash_add(txq->stash->sched_buf_hash, &stash->hlist, + stash->buf.compl_tag); memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); @@ -1584,7 +1775,7 @@ do { \ if (unlikely(!(ntc))) { \ ntc -= (txq)->desc_count; \ buf = (txq)->tx_buf; \ - desc = IDPF_FLEX_TX_DESC(txq, 0); \ + desc = &(txq)->flex_tx[0]; \ } else { \ (buf)++; \ (desc)++; \ @@ -1607,7 +1798,7 @@ do { \ * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. */ -static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, +static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct idpf_cleaned_stats *cleaned, bool descs_only) @@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, s16 ntc = tx_q->next_to_clean; struct idpf_tx_buf *tx_buf; - tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); - next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); + tx_desc = &tx_q->flex_tx[ntc]; + next_pending_desc = &tx_q->flex_tx[end]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -1703,7 +1894,7 @@ do { \ * stashed. Returns the byte/segment count for the cleaned packet associated * this completion tag. */ -static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, +static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, * * Returns bytes/packets cleaned */ -static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, +static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct idpf_splitq_tx_compl_desc *desc, struct idpf_cleaned_stats *cleaned, int budget) { u16 compl_tag; - if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { + if (!idpf_queue_has(FLOW_SCH_EN, txq)) { u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); @@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, +static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, int *cleaned) { struct idpf_splitq_tx_compl_desc *tx_desc; - struct idpf_vport *vport = complq->vport; s16 ntc = complq->next_to_clean; struct idpf_netdev_priv *np; unsigned int complq_budget; bool complq_ok = true; int i; - complq_budget = vport->compln_clean_budget; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + complq_budget = complq->clean_budget; + tx_desc = &complq->comp[ntc]; ntc -= complq->desc_count; do { struct idpf_cleaned_stats cleaned_stats = { }; - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; int rel_tx_qid; u16 hw_head; u8 ctype; /* completion type */ @@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, /* if the descriptor isn't done, no work yet to do */ gen = le16_get_bits(tx_desc->qid_comptype_gen, IDPF_TXD_COMPLQ_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + if (idpf_queue_has(GEN_CHK, complq) != gen) break; /* Find necessary info of TX queue to clean buffers */ @@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, IDPF_TXD_COMPLQ_QID_M); if (rel_tx_qid >= complq->txq_grp->num_txq || !complq->txq_grp->txqs[rel_tx_qid]) { - dev_err(&complq->vport->adapter->pdev->dev, - "TxQ not found\n"); + netdev_err(complq->netdev, "TxQ not found\n"); goto fetch_next_desc; } tx_q = complq->txq_grp->txqs[rel_tx_qid]; @@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, idpf_tx_handle_sw_marker(tx_q); break; default: - dev_err(&tx_q->vport->adapter->pdev->dev, - "Unknown TX completion type: %d\n", - ctype); + netdev_err(tx_q->netdev, + "Unknown TX completion type: %d\n", ctype); goto fetch_next_desc; } u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); - u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); + u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); + u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); tx_q->cleaned_pkts += cleaned_stats.packets; tx_q->cleaned_bytes += cleaned_stats.bytes; complq->num_completions++; @@ -1879,8 +2067,8 @@ fetch_next_desc: ntc++; if (unlikely(!ntc)) { ntc -= complq->desc_count; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); - change_bit(__IDPF_Q_GEN_CHK, complq->flags); + tx_desc = &complq->comp[0]; + idpf_queue_change(GEN_CHK, complq); } prefetch(tx_desc); @@ -1896,9 +2084,9 @@ fetch_next_desc: IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) complq_ok = false; - np = netdev_priv(complq->vport->netdev); + np = netdev_priv(complq->netdev); for (i = 0; i < complq->txq_grp->num_txq; ++i) { - struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; struct netdev_queue *nq; bool dont_wake; @@ -1909,11 +2097,11 @@ fetch_next_desc: *cleaned += tx_q->cleaned_pkts; /* Update BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(tx_q->vport->netdev); + !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, @@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, * * Returns 0 if stop is not needed */ -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size) { struct netdev_queue *nq; @@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) return 0; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); } @@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) * * Returns 0 if stop is not needed */ -static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, +static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) @@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, splitq_stop: u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); + netif_stop_subqueue(tx_q->netdev, tx_q->idx); return -EBUSY; } @@ -2040,12 +2228,12 @@ splitq_stop: * to do a register write to update our queue status. We know this can only * mean tail here as HW should be owning head for TX. */ -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more) { struct netdev_queue *nq; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); @@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, * * Returns number of data descriptors needed for this skb. */ -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb) { const struct skb_shared_info *shinfo; @@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, count = idpf_size_to_txd_count(skb->len); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.linearize); + u64_stats_inc(&txq->q_stats.linearize); u64_stats_update_end(&txq->stats_sync); } @@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, * @first: original first buffer info buffer for packet * @idx: starting point on ring to unwind */ -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 idx) { u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.dma_map_errs); + u64_stats_inc(&txq->q_stats.dma_map_errs); u64_stats_update_end(&txq->stats_sync); /* clear dma mappings for failed tx_buf map */ @@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * used one additional descriptor for a context * descriptor. Reset that here. */ - tx_desc = IDPF_FLEX_TX_DESC(txq, idx); + tx_desc = &txq->flex_tx[idx]; memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); if (idx == 0) idx = txq->desc_count; @@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * @txq: the tx ring to wrap * @ntu: ring index to bump */ -static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) { ntu++; @@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) * and gets a physical address for each memory location and programs * it and the length into the transmit flex descriptor. */ -static void idpf_tx_splitq_map(struct idpf_queue *tx_q, +static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_splitq_params *params, struct idpf_tx_buf *first) { @@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); + tx_desc = &tx_q->flex_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); @@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); @@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } @@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, tx_q->txq_grp->num_completions_pending++; /* record bytecount for BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO * header, 1 for segment payload, and then 7 for the fragments. */ -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count) +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count) { if (likely(count < max_bufs)) return false; @@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * ring entry to reflect that this index is a context descriptor */ static struct idpf_flex_tx_ctx_desc * -idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; @@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; /* grab the next descriptor */ - desc = IDPF_FLEX_TX_CTX_DESC(txq, i); + desc = &txq->flex_ctx[i]; txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); return desc; @@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) * @tx_q: queue to send buffer on * @skb: pointer to skb */ -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) { u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.skb_drops); + u64_stats_inc(&tx_q->q_stats.skb_drops); u64_stats_update_end(&tx_q->stats_sync); idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); @@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) * Returns NETDEV_TX_OK if sent, else an error code */ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) + struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; struct idpf_tx_buf *first; @@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); + u64_stats_inc(&tx_q->q_stats.lso_pkts); u64_stats_update_end(&tx_q->stats_sync); } @@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); } - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; /* Set the RE bit to catch any packets that may have not been @@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, } /** - * idpf_tx_splitq_start - Selects the right Tx queue to send buffer + * idpf_tx_start - Selects the right Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code */ -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev) +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev) { struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { dev_kfree_skb_any(skb); @@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, return NETDEV_TX_OK; } - return idpf_tx_splitq_frame(skb, tx_q); -} - -/** - * idpf_ptype_to_htype - get a hash type - * @decoded: Decoded Rx packet type related fields - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) -{ - if (!decoded->known) - return PKT_HASH_TYPE_NONE; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->inner_prot) - return PKT_HASH_TYPE_L4; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->outer_ip) - return PKT_HASH_TYPE_L3; - if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; + if (idpf_is_queue_model_split(vport->txq_model)) + return idpf_tx_splitq_frame(skb, tx_q); + else + return idpf_tx_singleq_frame(skb, tx_q); } /** @@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco * @rx_desc: Receive descriptor * @decoded: Decoded Rx packet type related fields */ -static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static void +idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u32 hash; - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rxq->netdev, decoded)) return; hash = le16_to_cpu(rx_desc->hash1) | (rx_desc->ff2_mirrid_hash2.hash2 << 16) | (rx_desc->hash3 << 24); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, * * skb->protocol must be set before this function is called */ -static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - struct idpf_rx_ptype_decoded *decoded) +static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (!(csum_bits->l3l4p)) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; - if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; - if (ipv6 && csum_bits->ipv6exadd) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed */ - if (csum_bits->l4e) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded->inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - if (!csum_bits->raw_csum_inv) { - u16 csum = csum_bits->raw_csum; - - skb->csum = csum_unfold((__force __sum16)~swab16(csum)); - skb->ip_summed = CHECKSUM_COMPLETE; - } else { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - break; - case IDPF_RX_PTYPE_INNER_PROT_SCTP: + if (csum_bits.raw_csum_inv || + decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) { skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; + return; } + skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor * @rx_desc: receive descriptor - * @csum: structure to extract checksum fields * + * Return: parsed checksum status. **/ -static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_csum_decoded *csum) +static struct idpf_rx_csum_decoded +idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { + struct idpf_rx_csum_decoded csum = { }; u8 qword0, qword1; qword0 = rx_desc->status_err0_qw0; qword1 = rx_desc->status_err0_qw1; - csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + qword1); + csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, qword1); - csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, + csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, + qword1); + csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, qword1); - csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, - qword1); - csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, - qword1); - csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, - qword0); - csum->raw_csum_inv = + csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, + qword0); + csum.raw_csum_inv = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); - csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + + return csum; } /** @@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n * Populate the skb fields with the total number of RSC segments, RSC payload * length and packet type. */ -static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u16 rsc_segments, rsc_seg_len; bool ipv4, ipv6; int len; - if (unlikely(!decoded->outer_ip)) + if (unlikely(libeth_rx_pt_get_ip_ver(decoded) == + LIBETH_RX_PT_OUTER_L2)) return -EINVAL; rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); if (unlikely(!rsc_seg_len)) return -EINVAL; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (unlikely(!(ipv4 ^ ipv6))) return -EINVAL; @@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, tcp_gro_complete(skb); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); + u64_stats_inc(&rxq->q_stats.rsc_pkts); u64_stats_update_end(&rxq->stats_sync); return 0; @@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, * order to populate the hash, checksum, protocol, and * other fields within the skb. */ -static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, - struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +static int +idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { - struct idpf_rx_csum_decoded csum_bits = { }; - struct idpf_rx_ptype_decoded decoded; + struct idpf_rx_csum_decoded csum_bits; + struct libeth_rx_pt decoded; u16 rx_ptype; rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M); - - skb->protocol = eth_type_trans(skb, rxq->vport->netdev); - - decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; - /* If we don't know the ptype we can't do anything else with it. Just - * pass it up the stack as-is. - */ - if (!decoded.known) - return 0; + decoded = rxq->rx_ptype_lkup[rx_ptype]; /* process RSS/hash */ - idpf_rx_hash(rxq, skb, rx_desc, &decoded); + idpf_rx_hash(rxq, skb, rx_desc, decoded); + + skb->protocol = eth_type_trans(skb, rxq->netdev); if (le16_get_bits(rx_desc->hdrlen_flags, VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) - return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); + return idpf_rx_rsc(rxq, skb, rx_desc, decoded); + + csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc); + idpf_rx_csum(rxq, skb, csum_bits, decoded); - idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); - idpf_rx_csum(rxq, skb, &csum_bits, &decoded); + skb_record_rx_queue(skb, rxq->idx); return 0; } @@ -2976,103 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->page_offset, size, rx_buf->truesize); + u32 hr = rx_buf->page->pp->p.offset; - rx_buf->page = NULL; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, + rx_buf->offset + hr, size, rx_buf->truesize); } /** - * idpf_rx_construct_skb - Allocate skb and populate it - * @rxq: Rx descriptor queue - * @rx_buf: Rx buffer to pull data from - * @size: the length of the packet + * idpf_rx_hsplit_wa - handle header buffer overflows and split errors + * @hdr: Rx buffer for the headers + * @buf: Rx buffer for the payload + * @data_len: number of bytes received to the payload buffer * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. + * When a header buffer overflow occurs or the HW was unable do parse the + * packet type to perform header split, the whole frame gets placed to the + * payload buffer. We can't build a valid skb around a payload buffer when + * the header split is active since it doesn't reserve any head- or tailroom. + * In that case, copy either the whole frame when it's short or just the + * Ethernet header to the header buffer to be able to build an skb and adjust + * the data offset in the payload buffer, IOW emulate the header split. + * + * Return: number of bytes copied to the header buffer. */ -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size) +static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, + struct libeth_fqe *buf, u32 data_len) { - unsigned int headlen; - struct sk_buff *skb; - void *va; - - va = page_address(rx_buf->page) + rx_buf->page_offset; - - /* prefetch first cache line of first page */ - net_prefetch(va); - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE); - if (unlikely(!skb)) { - idpf_rx_put_page(rx_buf); - - return NULL; - } - - skb_record_rx_queue(skb, rxq->idx); - skb_mark_for_recycle(skb); + u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + const void *src; + void *dst; - /* Determine available headroom for copy */ - headlen = size; - if (headlen > IDPF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (!size) { - idpf_rx_put_page(rx_buf); - - return skb; - } + if (!libeth_rx_sync_for_cpu(buf, copy)) + return 0; - skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, - size, rx_buf->truesize); + dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; + src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); - /* Since we're giving the page to the stack, clear our reference to it. - * We'll get a new one during buffer posting. - */ - rx_buf->page = NULL; + buf->offset += copy; - return skb; + return copy; } /** - * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer - * @rxq: Rx descriptor queue - * @va: Rx buffer to pull data from + * idpf_rx_build_skb - Allocate skb and populate it from header buffer + * @buf: Rx buffer to pull data from * @size: the length of the packet * * This function allocates an skb. It then populates it with the page data from * the current receive descriptor, taking care to set up the skb correctly. - * This specifically uses a header buffer to start building the skb. */ -static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, - const void *va, - unsigned int size) +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { + u32 hr = buf->page->pp->p.offset; struct sk_buff *skb; + void *va; - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, size); + va = page_address(buf->page) + buf->offset; + prefetch(va + hr); + + skb = napi_build_skb(va, buf->truesize); if (unlikely(!skb)) return NULL; - skb_record_rx_queue(skb, rxq->idx); - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* More than likely, a payload fragment, which will use a page from - * page_pool will be added to the SKB so mark it for recycle - * preemptively. And if not, it's inconsequential. - */ skb_mark_for_recycle(skb); + skb_reserve(skb, hr); + __skb_put(skb, size); + return skb; } @@ -3115,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de * * Returns amount of work completed */ -static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) +static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) { int total_rx_bytes = 0, total_rx_pkts = 0; - struct idpf_queue *rx_bufq = NULL; + struct idpf_buf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < budget)) { struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + struct libeth_fqe *hdr, *rx_buf = NULL; struct idpf_sw_queue *refillq = NULL; struct idpf_rxq_set *rxq_set = NULL; - struct idpf_rx_buf *rx_buf = NULL; - union virtchnl2_rx_desc *desc; unsigned int pkt_len = 0; unsigned int hdr_len = 0; u16 gen_id, buf_id = 0; - /* Header buffer overflow only valid for header split */ - bool hbo = false; int bufq_id; u8 rxdid; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - desc = IDPF_RX_DESC(rxq, ntc); - rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; + rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc @@ -3150,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) + if (idpf_queue_has(GEN_CHK, rxq) != gen_id) break; rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, @@ -3158,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { IDPF_RX_BUMP_NTC(rxq, ntc); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.bad_descs); + u64_stats_inc(&rxq->q_stats.bad_descs); u64_stats_update_end(&rxq->stats_sync); continue; } @@ -3166,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M); - hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, - rx_desc->status_err0_qw1); - - if (unlikely(hbo)) { - /* If a header buffer overflow, occurs, i.e. header is - * too large to fit in the header split buffer, HW will - * put the entire packet, including headers, in the - * data/payload buffer. - */ - u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); - u64_stats_update_end(&rxq->stats_sync); - goto bypass_hsplit; - } - - hdr_len = le16_get_bits(rx_desc->hdrlen_flags, - VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M); - -bypass_hsplit: bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); - if (!bufq_id) - refillq = rxq_set->refillq0; - else - refillq = rxq_set->refillq1; + refillq = rxq_set->refillq[bufq_id]; /* retrieve buffer from the rxq */ - rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; + rx_bufq = &rxq->bufq_sets[bufq_id].bufq; buf_id = le16_to_cpu(rx_desc->buf_id); - rx_buf = &rx_bufq->rx_buf.buf[buf_id]; + rx_buf = &rx_bufq->buf[buf_id]; + + if (!rx_bufq->hdr_pp) + goto payload; + +#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M +#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M + if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT))) + /* If a header buffer overflow, occurs, i.e. header is + * too large to fit in the header split buffer, HW will + * put the entire packet, including headers, in the + * data/payload buffer. + */ + hdr_len = le16_get_bits(rx_desc->hdrlen_flags, + __HDR_LEN_MASK); +#undef __HDR_LEN_MASK +#undef __HBO_BIT + + hdr = &rx_bufq->hdr_buf[buf_id]; - if (hdr_len) { - const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + - (u32)buf_id * IDPF_HDR_BUF_SIZE; + if (unlikely(!hdr_len && !skb)) { + hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); + pkt_len -= hdr_len; - skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); + u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); u64_stats_update_end(&rxq->stats_sync); } - if (pkt_len) { - idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (skb) - idpf_rx_add_frag(rx_buf, skb, pkt_len); - else - skb = idpf_rx_construct_skb(rxq, rx_buf, - pkt_len); - } else { - idpf_rx_put_page(rx_buf); + if (libeth_rx_sync_for_cpu(hdr, hdr_len)) { + skb = idpf_rx_build_skb(hdr, hdr_len); + if (!skb) + break; + + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.hsplit_pkts); + u64_stats_update_end(&rxq->stats_sync); } + hdr->page = NULL; + +payload: + if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) + goto skip_data; + + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); + else + skb = idpf_rx_build_skb(rx_buf, pkt_len); + /* exit if we failed to retrieve a buffer */ if (!skb) break; - idpf_rx_post_buf_refill(refillq, buf_id); +skip_data: + rx_buf->page = NULL; + idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); + /* skip if it is non EOP desc */ - if (!idpf_rx_splitq_is_eop(rx_desc)) + if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb)) continue; /* pad skb if needed (to make valid ethernet frame) */ @@ -3250,7 +3379,7 @@ bypass_hsplit: } /* send completed skb up the stack */ - napi_gro_receive(&rxq->q_vector->napi, skb); + napi_gro_receive(rxq->napi, skb); skb = NULL; /* update budget accounting */ @@ -3261,8 +3390,8 @@ bypass_hsplit: rxq->skb = skb; u64_stats_update_begin(&rxq->stats_sync); - u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rxq->q_stats.packets, total_rx_pkts); + u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rxq->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -3272,34 +3401,41 @@ bypass_hsplit: /** * idpf_rx_update_bufq_desc - Update buffer queue descriptor * @bufq: Pointer to the buffer queue - * @refill_desc: SW Refill queue descriptor containing buffer ID + * @buf_id: buffer ID * @buf_desc: Buffer queue descriptor * * Return 0 on success and negative on failure. */ -static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, +static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id, struct virtchnl2_splitq_rx_buf_desc *buf_desc) { - struct idpf_rx_buf *buf; + struct libeth_fq_fp fq = { + .pp = bufq->pp, + .fqes = bufq->buf, + .truesize = bufq->truesize, + .count = bufq->desc_count, + }; dma_addr_t addr; - u16 buf_id; - - buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); - buf = &bufq->rx_buf.buf[buf_id]; - - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return -ENOMEM; buf_desc->pkt_addr = cpu_to_le64(addr); buf_desc->qword0.buf_id = cpu_to_le16(buf_id); - if (!bufq->rx_hsplit_en) + if (!idpf_queue_has(HSPLIT_EN, bufq)) return 0; - buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return -ENOMEM; + + buf_desc->hdr_addr = cpu_to_le64(addr); return 0; } @@ -3311,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, * * This function takes care of the buffer refill management */ -static void idpf_rx_clean_refillq(struct idpf_queue *bufq, +static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, struct idpf_sw_queue *refillq) { struct virtchnl2_splitq_rx_buf_desc *buf_desc; u16 bufq_nta = bufq->next_to_alloc; u16 ntc = refillq->next_to_clean; int cleaned = 0; - u16 gen; - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + buf_desc = &bufq->split_buf[bufq_nta]; /* make sure we stop at ring wrap in the unlikely case ring is full */ while (likely(cleaned < refillq->desc_count)) { - u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + u32 buf_id, refill_desc = refillq->ring[ntc]; bool failure; - gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); - if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) + if (idpf_queue_has(RFL_GEN_CHK, refillq) != + !!(refill_desc & IDPF_RX_BI_GEN_M)) break; - failure = idpf_rx_update_bufq_desc(bufq, refill_desc, - buf_desc); + buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); if (failure) break; if (unlikely(++ntc == refillq->desc_count)) { - change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_change(RFL_GEN_CHK, refillq); ntc = 0; } if (unlikely(++bufq_nta == bufq->desc_count)) { - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + buf_desc = &bufq->split_buf[0]; bufq_nta = 0; } else { buf_desc++; @@ -3371,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq, /** * idpf_rx_clean_refillq_all - Clean all refill queues * @bufq: buffer queue with refill queues + * @nid: ID of the closest NUMA node with memory * * Iterates through all refill queues assigned to the buffer queue assigned to * this vector. Returns true if clean is complete within budget, false * otherwise. */ -static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) +static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid) { struct idpf_bufq_set *bufq_set; int i; + page_pool_nid_changed(bufq->pp, nid); + if (bufq->hdr_pp) + page_pool_nid_changed(bufq->hdr_pp, nid); + bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); for (i = 0; i < bufq_set->num_refillqs; i++) idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); @@ -3441,12 +3581,16 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + kfree(q_vector->complq); + q_vector->complq = NULL; kfree(q_vector->bufq); q_vector->bufq = NULL; kfree(q_vector->tx); q_vector->tx = NULL; kfree(q_vector->rx); q_vector->rx = NULL; + + free_cpumask_var(q_vector->affinity_mask); } /* Clean up the mapping of queues to vectors */ @@ -3495,7 +3639,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, q_vector); + kfree(free_irq(irq_num, q_vector)); } } @@ -3579,13 +3723,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector) goto check_rx_itr; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { - struct idpf_queue *txq = q_vector->tx[i]; + struct idpf_tx_queue *txq = q_vector->tx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&txq->stats_sync); - packets += u64_stats_read(&txq->q_stats.tx.packets); - bytes += u64_stats_read(&txq->q_stats.tx.bytes); + packets += u64_stats_read(&txq->q_stats.packets); + bytes += u64_stats_read(&txq->q_stats.bytes); } while (u64_stats_fetch_retry(&txq->stats_sync, start)); } @@ -3598,13 +3742,13 @@ check_rx_itr: return; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { - struct idpf_queue *rxq = q_vector->rx[i]; + struct idpf_rx_queue *rxq = q_vector->rx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&rxq->stats_sync); - packets += u64_stats_read(&rxq->q_stats.rx.packets); - bytes += u64_stats_read(&rxq->q_stats.rx.bytes); + packets += u64_stats_read(&rxq->q_stats.packets); + bytes += u64_stats_read(&rxq->q_stats.bytes); } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); } @@ -3646,6 +3790,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + char *name; vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; @@ -3659,18 +3804,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) else continue; - q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - basename, vec_name, vidx); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name, + vidx); err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, - q_vector->name, q_vector); + name, q_vector); if (err) { netdev_err(vport->netdev, "Request_irq failed, error: %d\n", err); goto free_q_irqs; } /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + irq_set_affinity_hint(irq_num, q_vector->affinity_mask); } return 0; @@ -3679,7 +3824,7 @@ free_q_irqs: while (--vector >= 0) { vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; - free_irq(irq_num, &vport->q_vectors[vector]); + kfree(free_irq(irq_num, &vport->q_vectors[vector])); } return err; @@ -3846,16 +3991,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, int *cleaned) { - u16 num_txq = q_vec->num_txq; + u16 num_complq = q_vec->num_complq; bool clean_complete = true; int i, budget_per_q; - if (unlikely(!num_txq)) + if (unlikely(!num_complq)) return true; - budget_per_q = DIV_ROUND_UP(budget, num_txq); - for (i = 0; i < num_txq; i++) - clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], + budget_per_q = DIV_ROUND_UP(budget, num_complq); + + for (i = 0; i < num_complq; i++) + clean_complete &= idpf_tx_clean_complq(q_vec->complq[i], budget_per_q, cleaned); return clean_complete; @@ -3876,13 +4022,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, bool clean_complete = true; int pkts_cleaned = 0; int i, budget_per_q; + int nid; /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); @@ -3893,8 +4040,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, } *cleaned = pkts_cleaned; + nid = numa_mem_id(); + for (i = 0; i < q_vec->num_bufq; i++) - idpf_rx_clean_refillq_all(q_vec->bufq[i]); + idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); return clean_complete; } @@ -3937,8 +4086,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) * queues virtchnl message, as the interrupts will be disabled after * that */ - if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, - q_vector->tx[0]->flags))) + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) return budget; else return work_done; @@ -3952,27 +4101,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) { + bool split = idpf_is_queue_model_split(vport->rxq_model); u16 num_txq_grp = vport->num_txq_grp; - int i, j, qv_idx, bufq_vidx = 0; struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; - struct idpf_queue *q, *bufq; - u16 q_index; + u32 i, qv_idx, q_index; for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { u16 num_rxq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + rx_qgrp = &vport->rxq_grps[i]; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; - for (j = 0; j < num_rxq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; @@ -3980,52 +4130,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q_index = q->q_vector->num_rxq; q->q_vector->rx[q_index] = q; q->q_vector->num_rxq++; - qv_idx++; + + if (split) + q->napi = &q->q_vector->napi; } - if (idpf_is_queue_model_split(vport->rxq_model)) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + if (split) { + for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *bufq; + bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; - bufq->q_vector = &vport->q_vectors[bufq_vidx]; + bufq->q_vector = &vport->q_vectors[qv_idx]; q_index = bufq->q_vector->num_bufq; bufq->q_vector->bufq[q_index] = bufq; bufq->q_vector->num_bufq++; } - if (++bufq_vidx >= vport->num_q_vectors) - bufq_vidx = 0; } + + qv_idx++; } + split = idpf_is_queue_model_split(vport->txq_model); + for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { u16 num_txq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + tx_qgrp = &vport->txq_grps[i]; num_txq = tx_qgrp->num_txq; - if (idpf_is_queue_model_split(vport->txq_model)) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_txq; j++) { + struct idpf_tx_queue *q; - q = tx_qgrp->complq; + q = tx_qgrp->txqs[j]; q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; - qv_idx++; - } else { - for (j = 0; j < num_txq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + q->q_vector->tx[q->q_vector->num_txq++] = q; + } - q = tx_qgrp->txqs[j]; - q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; + if (split) { + struct idpf_compl_queue *q = tx_qgrp->complq; - qv_idx++; - } + q->q_vector = &vport->q_vectors[qv_idx]; + q->q_vector->complq[q->q_vector->num_complq++] = q; } + + qv_idx++; } } @@ -4086,7 +4237,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + cpumask_set_cpu(v_idx, q_vector->affinity_mask); } } @@ -4101,18 +4252,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) { u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; struct idpf_q_vector *q_vector; - int v_idx, err; + u32 complqs_per_vector, v_idx; vport->q_vectors = kcalloc(vport->num_q_vectors, sizeof(struct idpf_q_vector), GFP_KERNEL); if (!vport->q_vectors) return -ENOMEM; - txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); - rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); + txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); + rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, + vport->num_q_vectors); bufqs_per_vector = vport->num_bufqs_per_qgrp * DIV_ROUND_UP(vport->num_rxq_grp, vport->num_q_vectors); + complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; @@ -4126,32 +4281,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; - q_vector->tx = kcalloc(txqs_per_vector, - sizeof(struct idpf_queue *), + if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) + goto error; + + q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), GFP_KERNEL); - if (!q_vector->tx) { - err = -ENOMEM; + if (!q_vector->tx) goto error; - } - q_vector->rx = kcalloc(rxqs_per_vector, - sizeof(struct idpf_queue *), + q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), GFP_KERNEL); - if (!q_vector->rx) { - err = -ENOMEM; + if (!q_vector->rx) goto error; - } if (!idpf_is_queue_model_split(vport->rxq_model)) continue; q_vector->bufq = kcalloc(bufqs_per_vector, - sizeof(struct idpf_queue *), + sizeof(*q_vector->bufq), GFP_KERNEL); - if (!q_vector->bufq) { - err = -ENOMEM; + if (!q_vector->bufq) + goto error; + + q_vector->complq = kcalloc(complqs_per_vector, + sizeof(*q_vector->complq), + GFP_KERNEL); + if (!q_vector->complq) goto error; - } } return 0; @@ -4159,7 +4315,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) error: idpf_vport_intr_rel(vport); - return err; + return -ENOMEM; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 551391e20464..6215dbee5546 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -4,10 +4,13 @@ #ifndef _IDPF_TXRX_H_ #define _IDPF_TXRX_H_ -#include <net/page_pool/helpers.h> +#include <linux/dim.h> + +#include <net/libeth/cache.h> #include <net/tcp.h> #include <net/netdev_queues.h> +#include "idpf_lan_txrx.h" #include "virtchnl2_lan_desc.h" #define IDPF_LARGE_MAX_Q 256 @@ -83,7 +86,7 @@ do { \ if (unlikely(++(ntc) == (rxq)->desc_count)) { \ ntc = 0; \ - change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ + idpf_queue_change(GEN_CHK, rxq); \ } \ } while (0) @@ -93,16 +96,10 @@ do { \ idx = 0; \ } while (0) -#define IDPF_RX_HDR_SIZE 256 -#define IDPF_RX_BUF_2048 2048 -#define IDPF_RX_BUF_4096 4096 #define IDPF_RX_BUF_STRIDE 32 #define IDPF_RX_BUF_POST_STRIDE 16 #define IDPF_LOW_WATERMARK 64 -/* Size of header buffer specifically for header split */ -#define IDPF_HDR_BUF_SIZE 256 -#define IDPF_PACKET_HDR_PAD \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) + #define IDPF_TX_TSO_MIN_MSS 88 /* Minimum number of descriptors between 2 descriptors with the RE bit set; @@ -110,36 +107,17 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64 -#define IDPF_RX_BI_BUFID_S 0 -#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) -#define IDPF_RX_BI_GEN_S 15 -#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S) +#define IDPF_RX_BI_GEN_M BIT(16) +#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) + #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M -#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) - -#define IDPF_BASE_TX_DESC(txq, i) \ - (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) -#define IDPF_BASE_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) -#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ - (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) - -#define IDPF_FLEX_TX_DESC(txq, i) \ - (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) -#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) - #define IDPF_DESC_UNUSED(txq) \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) -#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) +#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ (txq)->desc_count >> 2) @@ -315,16 +293,7 @@ struct idpf_rx_extracted { #define IDPF_TX_MAX_DESC_DATA_ALIGNED \ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) -#define IDPF_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#define IDPF_RX_DESC(rxq, i) \ - (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) - -struct idpf_rx_buf { - struct page *page; - unsigned int page_offset; - u16 truesize; -}; +#define idpf_rx_buf libeth_fqe #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ @@ -348,72 +317,6 @@ struct idpf_rx_buf { #define IDPF_RX_MAX_BASE_PTYPE 256 #define IDPF_INVALID_PTYPE_ID 0xFFFF -/* Packet type non-ip values */ -enum idpf_rx_ptype_l2 { - IDPF_RX_PTYPE_L2_RESERVED = 0, - IDPF_RX_PTYPE_L2_MAC_PAY2 = 1, - IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - IDPF_RX_PTYPE_L2_FIP_PAY2 = 3, - IDPF_RX_PTYPE_L2_OUI_PAY2 = 4, - IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6, - IDPF_RX_PTYPE_L2_ECP_PAY2 = 7, - IDPF_RX_PTYPE_L2_EVB_PAY2 = 8, - IDPF_RX_PTYPE_L2_QCN_PAY2 = 9, - IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10, - IDPF_RX_PTYPE_L2_ARP = 11, -}; - -enum idpf_rx_ptype_outer_ip { - IDPF_RX_PTYPE_OUTER_L2 = 0, - IDPF_RX_PTYPE_OUTER_IP = 1, -}; - -#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \ - (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \ - ((ptype)->outer_ip_ver == (ipv))) - -enum idpf_rx_ptype_outer_ip_ver { - IDPF_RX_PTYPE_OUTER_NONE = 0, - IDPF_RX_PTYPE_OUTER_IPV4 = 1, - IDPF_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum idpf_rx_ptype_outer_fragmented { - IDPF_RX_PTYPE_NOT_FRAG = 0, - IDPF_RX_PTYPE_FRAG = 1, -}; - -enum idpf_rx_ptype_tunnel_type { - IDPF_RX_PTYPE_TUNNEL_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_IP_IP = 1, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum idpf_rx_ptype_tunnel_end_prot { - IDPF_RX_PTYPE_TUNNEL_END_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1, - IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum idpf_rx_ptype_inner_prot { - IDPF_RX_PTYPE_INNER_PROT_NONE = 0, - IDPF_RX_PTYPE_INNER_PROT_UDP = 1, - IDPF_RX_PTYPE_INNER_PROT_TCP = 2, - IDPF_RX_PTYPE_INNER_PROT_SCTP = 3, - IDPF_RX_PTYPE_INNER_PROT_ICMP = 4, - IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum idpf_rx_ptype_payload_layer { - IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - enum idpf_tunnel_state { IDPF_PTYPE_TUNNEL_IP = BIT(0), IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), @@ -421,22 +324,9 @@ enum idpf_tunnel_state { }; struct idpf_ptype_state { - bool outer_ip; - bool outer_frag; - u8 tunnel_state; -}; - -struct idpf_rx_ptype_decoded { - u32 ptype:10; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; + bool outer_ip:1; + bool outer_frag:1; + u8 tunnel_state:6; }; /** @@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded { * to 1 and knows that reading a gen bit of 1 in any * descriptor on the initial pass of the ring indicates a * writeback. It also flips on every ring wrap. - * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit - * and RFLGQ_GEN is the SW bit. + * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW + * bit and Q_RFL_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_POLL_MODE: Enable poll mode + * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode + * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { __IDPF_Q_GEN_CHK, - __IDPF_RFLQ_GEN_CHK, + __IDPF_Q_RFL_GEN_CHK, __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_POLL_MODE, + __IDPF_Q_CRC_EN, + __IDPF_Q_HSPLIT_EN, __IDPF_Q_FLAGS_NBITS, }; +#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags) + +#define idpf_queue_has_clear(f, q) \ + __test_and_clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_assign(f, q, v) \ + __assign_bit(__IDPF_Q_##f, (q)->flags, v) + /** * struct idpf_vec_regs * @dyn_ctl_reg: Dynamic control interrupt register offset @@ -509,54 +413,68 @@ struct idpf_intr_reg { /** * struct idpf_q_vector * @vport: Vport back pointer - * @affinity_mask: CPU affinity mask - * @napi: napi handler - * @v_idx: Vector index - * @intr_reg: See struct idpf_intr_reg + * @num_rxq: Number of RX queues * @num_txq: Number of TX queues + * @num_bufq: Number of buffer queues + * @num_complq: number of completion queues + * @rx: Array of RX queues to service * @tx: Array of TX queues to service + * @bufq: Array of buffer queues to service + * @complq: array of completion queues + * @intr_reg: See struct idpf_intr_reg + * @napi: napi handler + * @total_events: Number of interrupts processed * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not * @tx_itr_idx: TX ITR index - * @num_rxq: Number of RX queues - * @rx: Array of RX queues to service * @rx_dim: Data for RX net_dim algorithm * @rx_itr_value: RX interrupt throttling rate * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index - * @num_bufq: Number of buffer queues - * @bufq: Array of buffer queues to service - * @total_events: Number of interrupts processed - * @name: Queue vector name + * @v_idx: Vector index + * @affinity_mask: CPU affinity mask */ struct idpf_q_vector { + __cacheline_group_begin_aligned(read_mostly); struct idpf_vport *vport; - cpumask_t affinity_mask; - struct napi_struct napi; - u16 v_idx; - struct idpf_intr_reg intr_reg; + u16 num_rxq; u16 num_txq; - struct idpf_queue **tx; + u16 num_bufq; + u16 num_complq; + struct idpf_rx_queue **rx; + struct idpf_tx_queue **tx; + struct idpf_buf_queue **bufq; + struct idpf_compl_queue **complq; + + struct idpf_intr_reg intr_reg; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + struct napi_struct napi; + u16 total_events; + struct dim tx_dim; u16 tx_itr_value; bool tx_intr_mode; u32 tx_itr_idx; - u16 num_rxq; - struct idpf_queue **rx; struct dim rx_dim; u16 rx_itr_value; bool rx_intr_mode; u32 rx_itr_idx; + __cacheline_group_end_aligned(read_write); - u16 num_bufq; - struct idpf_queue **bufq; + __cacheline_group_begin_aligned(cold); + u16 v_idx; - u16 total_events; - char *name; + cpumask_var_t affinity_mask; + __cacheline_group_end_aligned(cold); }; +libeth_cacheline_set_assert(struct idpf_q_vector, 104, + 424 + 2 * sizeof(struct dim), + 8 + sizeof(cpumask_var_t)); struct idpf_rx_queue_stats { u64_stats_t packets; @@ -583,11 +501,6 @@ struct idpf_cleaned_stats { u32 bytes; }; -union idpf_queue_stats { - struct idpf_rx_queue_stats rx; - struct idpf_tx_queue_stats tx; -}; - #define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_20K 0x0032 @@ -603,68 +516,123 @@ union idpf_queue_stats { #define IDPF_DIM_DEFAULT_PROFILE_IX 1 /** - * struct idpf_queue - * @dev: Device back pointer for DMA mapping - * @vport: Back pointer to associated vport - * @txq_grp: See struct idpf_txq_group - * @rxq_grp: See struct idpf_rxq_group - * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, - * buffer queue uses this index to determine which group of refill queues - * to clean. - * For TX queue, it is used as index to map between TX queue group and - * hot path TX pointers stored in vport. Used in both singleq/splitq. - * For RX queue, it is used to index to total RX queue across groups and + * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode + * @buf_stack: Stack of empty buffers to store buffer info for out of order + * buffer completions. See struct idpf_buf_lifo + * @sched_buf_hash: Hash table to store buffers + */ +struct idpf_txq_stash { + struct idpf_buf_lifo buf_stack; + DECLARE_HASHTABLE(sched_buf_hash, 12); +} ____cacheline_aligned; + +/** + * struct idpf_rx_queue - software structure representing a receive queue + * @rx: universal receive descriptor array + * @single_buf: buffer descriptor array in singleq + * @desc_ring: virtual descriptor ring address + * @bufq_sets: Pointer to the array of buffer queues in splitq mode + * @napi: NAPI instance corresponding to this queue (splitq) + * @rx_buf: See struct &libeth_fqe + * @pp: Page pool pointer in singleq mode + * @netdev: &net_device corresponding to this queue + * @tail: Tail offset. Used for both queue models single and split. + * @flags: See enum idpf_queue_flags_t + * @idx: For RX queue, it is used to index to total RX queue across groups and * used for skb reporting. - * @tail: Tail offset. Used for both queue models single and split. In splitq - * model relevant only for TX queue and RX queue. - * @tx_buf: See struct idpf_tx_buf - * @rx_buf: Struct with RX buffer related members - * @rx_buf.buf: See struct idpf_rx_buf - * @rx_buf.hdr_buf_pa: DMA handle - * @rx_buf.hdr_buf_va: Virtual address - * @pp: Page pool pointer - * @skb: Pointer to the skb - * @q_type: Queue type (TX, RX, TX completion, RX buffer) - * @q_id: Queue id * @desc_count: Number of descriptors - * @next_to_use: Next descriptor to use. Relevant in both split & single txq - * and bufq. - * @next_to_clean: Next descriptor to clean. In split queue model, only - * relevant to TX completion queue and RX queue. - * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model - * only relevant to RX queue. - * @flags: See enum idpf_queue_flags_t - * @q_stats: See union idpf_queue_stats + * @rxdids: Supported RX descriptor ids + * @rx_ptype_lkup: LUT of Rx ptypes + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @skb: Pointer to the skb + * @truesize: data buffer truesize in singleq * @stats_sync: See struct u64_stats_sync - * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on - * the TX completion queue, it can be for any TXQ associated - * with that completion queue. This means we can clean up to - * N TXQs during a single call to clean the completion queue. - * cleaned_bytes|pkts tracks the clean stats per TXQ during - * that single call to clean the completion queue. By doing so, - * we can update BQL with aggregate cleaned stats for each TXQ - * only once at the end of the cleaning routine. - * @cleaned_pkts: Number of packets cleaned for the above said case - * @rx_hsplit_en: RX headsplit enable + * @q_stats: See union idpf_rx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark * @rx_hbuf_size: Header buffer size * @rx_buf_size: Buffer size * @rx_max_pkt_size: RX max packet size - * @rx_buf_stride: RX buffer stride - * @rx_buffer_low_watermark: RX buffer low watermark - * @rxdids: Supported RX descriptor ids - * @q_vector: Backreference to associated vector - * @size: Length of descriptor ring in bytes - * @dma: Physical address of ring - * @desc_ring: Descriptor ring memory - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + */ +struct idpf_rx_queue { + __cacheline_group_begin_aligned(read_mostly); + union { + union virtchnl2_rx_desc *rx; + struct virtchnl2_singleq_rx_buf_desc *single_buf; + + void *desc_ring; + }; + union { + struct { + struct idpf_bufq_set *bufq_sets; + struct napi_struct *napi; + }; + struct { + struct libeth_fqe *rx_buf; + struct page_pool *pp; + }; + }; + struct net_device *netdev; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; + u16 desc_count; + + u32 rxdids; + const struct libeth_rx_pt *rx_ptype_lkup; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct sk_buff *skb; + u32 truesize; + + struct u64_stats_sync stats_sync; + struct idpf_rx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + u16 rx_max_pkt_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_rx_queue, 64, + 80 + sizeof(struct u64_stats_sync), + 32); + +/** + * struct idpf_tx_queue - software structure representing a transmit queue + * @base_tx: base Tx descriptor array + * @base_ctx: base Tx context descriptor array + * @flex_tx: flex Tx descriptor array + * @flex_ctx: flex Tx context descriptor array + * @desc_ring: virtual descriptor ring address + * @tx_buf: See struct idpf_tx_buf + * @txq_grp: See struct idpf_txq_group + * @dev: Device back pointer for DMA mapping + * @tail: Tail offset. Used for both queue models single and split + * @flags: See enum idpf_queue_flags_t + * @idx: For TX queue, it is used as index to map between TX queue group and + * hot path TX pointers stored in vport. Used in both singleq/splitq. + * @desc_count: Number of descriptors * @tx_min_pkt_len: Min supported packet length - * @num_completions: Only relevant for TX completion queue. It tracks the - * number of completions received to compare against the - * number of completions pending, as accumulated by the - * TX queues. - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo. - * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_gen_s: Completion tag generation bit * The format of the completion tag will change based on the TXQ * descriptor ring size so that we can maintain roughly the same level @@ -685,108 +653,238 @@ union idpf_queue_stats { * -------------------------------- * * This gives us 8*8160 = 65280 possible unique values. + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on + * the TX completion queue, it can be for any TXQ associated + * with that completion queue. This means we can clean up to + * N TXQs during a single call to clean the completion queue. + * cleaned_bytes|pkts tracks the clean stats per TXQ during + * that single call to clean the completion queue. By doing so, + * we can update BQL with aggregate cleaned stats for each TXQ + * only once at the end of the cleaning routine. + * @clean_budget: singleq only, queue cleaning budget + * @cleaned_pkts: Number of packets cleaned for the above said case + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + * @stash: Tx buffer stash for Flow-based scheduling mode + * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset - * @sched_buf_hash: Hash table to stores buffers + * @stats_sync: See struct u64_stats_sync + * @q_stats: See union idpf_tx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector */ -struct idpf_queue { - struct device *dev; - struct idpf_vport *vport; +struct idpf_tx_queue { + __cacheline_group_begin_aligned(read_mostly); union { - struct idpf_txq_group *txq_grp; - struct idpf_rxq_group *rxq_grp; + struct idpf_base_tx_desc *base_tx; + struct idpf_base_tx_ctx_desc *base_ctx; + union idpf_tx_flex_desc *flex_tx; + struct idpf_flex_tx_ctx_desc *flex_ctx; + + void *desc_ring; }; - u16 idx; + struct idpf_tx_buf *tx_buf; + struct idpf_txq_group *txq_grp; + struct device *dev; void __iomem *tail; - union { - struct idpf_tx_buf *tx_buf; - struct { - struct idpf_rx_buf *buf; - dma_addr_t hdr_buf_pa; - void *hdr_buf_va; - } rx_buf; - }; - struct page_pool *pp; - struct sk_buff *skb; - u16 q_type; - u32 q_id; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; u16 desc_count; + u16 tx_min_pkt_len; + u16 compl_tag_gen_s; + + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; - DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - union idpf_queue_stats q_stats; + union { + u32 cleaned_bytes; + u32 clean_budget; + }; + u16 cleaned_pkts; + + u16 tx_max_bufs; + struct idpf_txq_stash *stash; + + u16 compl_tag_bufid_m; + u16 compl_tag_cur_gen; + u16 compl_tag_gen_max; + struct u64_stats_sync stats_sync; + struct idpf_tx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); - u32 cleaned_bytes; - u16 cleaned_pkts; + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; - bool rx_hsplit_en; - u16 rx_hbuf_size; - u16 rx_buf_size; - u16 rx_max_pkt_size; - u16 rx_buf_stride; - u8 rx_buffer_low_watermark; - u64 rxdids; struct idpf_q_vector *q_vector; - unsigned int size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_tx_queue, 64, + 88 + sizeof(struct u64_stats_sync), + 24); + +/** + * struct idpf_buf_queue - software structure representing a buffer queue + * @split_buf: buffer descriptor array + * @hdr_buf: &libeth_fqe for header buffers + * @hdr_pp: &page_pool for header buffers + * @buf: &libeth_fqe for data buffers + * @pp: &page_pool for data buffers + * @tail: Tail offset + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @hdr_truesize: truesize for buffer headers + * @truesize: truesize for data buffers + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark + * @rx_hbuf_size: Header buffer size + * @rx_buf_size: Buffer size + */ +struct idpf_buf_queue { + __cacheline_group_begin_aligned(read_mostly); + struct virtchnl2_splitq_rx_buf_desc *split_buf; + struct libeth_fqe *hdr_buf; + struct page_pool *hdr_pp; + struct libeth_fqe *buf; + struct page_pool *pp; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + u32 next_to_alloc; + + u32 hdr_truesize; + u32 truesize; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; dma_addr_t dma; - void *desc_ring; - u16 tx_max_bufs; - u8 tx_min_pkt_len; + struct idpf_q_vector *q_vector; - u32 num_completions; + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32); - struct idpf_buf_lifo buf_stack; +/** + * struct idpf_compl_queue - software structure representing a completion queue + * @comp: completion descriptor array + * @txq_grp: See struct idpf_txq_group + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @clean_budget: queue cleaning budget + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use. Relevant in both split & single txq + * and bufq. + * @next_to_clean: Next descriptor to clean + * @num_completions: Only relevant for TX completion queue. It tracks the + * number of completions received to compare against the + * number of completions pending, as accumulated by the + * TX queues. + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + */ +struct idpf_compl_queue { + __cacheline_group_begin_aligned(read_mostly); + struct idpf_splitq_tx_compl_desc *comp; + struct idpf_txq_group *txq_grp; - u16 compl_tag_bufid_m; - u16 compl_tag_gen_s; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; - u16 compl_tag_cur_gen; - u16 compl_tag_gen_max; + u32 clean_budget; + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_internodealigned_in_smp; + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + + u32 num_completions; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24); /** * struct idpf_sw_queue - * @next_to_clean: Next descriptor to clean - * @next_to_alloc: Buffer to allocate at - * @flags: See enum idpf_queue_flags_t * @ring: Pointer to the ring + * @flags: See enum idpf_queue_flags_t * @desc_count: Descriptor count - * @dev: Device back pointer for DMA mapping + * @next_to_use: Buffer to allocate at + * @next_to_clean: Next descriptor to clean * * Software queues are used in splitq mode to manage buffers between rxq * producer and the bufq consumer. These are required in order to maintain a * lockless buffer management system and are strictly software only constructs. */ struct idpf_sw_queue { - u16 next_to_clean; - u16 next_to_alloc; + __cacheline_group_begin_aligned(read_mostly); + u32 *ring; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - u16 *ring; - u16 desc_count; - struct device *dev; -} ____cacheline_internodealigned_in_smp; + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + __cacheline_group_end_aligned(read_write); +}; +libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24); +libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8); +libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8); /** * struct idpf_rxq_set * @rxq: RX queue - * @refillq0: Pointer to refill queue 0 - * @refillq1: Pointer to refill queue 1 + * @refillq: pointers to refill queues * * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. * Each rxq needs a refillq to return used buffers back to the respective bufq. * Bufqs then clean these refillqs for buffers to give to hardware. */ struct idpf_rxq_set { - struct idpf_queue rxq; - struct idpf_sw_queue *refillq0; - struct idpf_sw_queue *refillq1; + struct idpf_rx_queue rxq; + struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP]; }; /** @@ -805,7 +903,7 @@ struct idpf_rxq_set { * managed by at most two bufqs (depending on performance configuration). */ struct idpf_bufq_set { - struct idpf_queue bufq; + struct idpf_buf_queue bufq; int num_refillqs; struct idpf_sw_queue *refillqs; }; @@ -831,7 +929,7 @@ struct idpf_rxq_group { union { struct { u16 num_rxq; - struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; + struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q]; } singleq; struct { u16 num_rxq_sets; @@ -846,6 +944,7 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers + * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -859,13 +958,26 @@ struct idpf_txq_group { struct idpf_vport *vport; u16 num_txq; - struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_txq_stash *stashes; - struct idpf_queue *complq; + struct idpf_compl_queue *complq; u32 num_completions_pending; }; +static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) +{ + u32 cpu; + + if (!q_vector) + return NUMA_NO_NODE; + + cpu = cpumask_first(q_vector->affinity_mask); + + return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE; +} + /** * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag * @size: transmit request size in bytes @@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); } -/** - * idpf_alloc_page - Allocate a new RX buffer from the page pool - * @pool: page_pool to allocate from - * @buf: metadata struct to populate with page info - * @buf_size: 2K or 4K - * - * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise. - */ -static inline dma_addr_t idpf_alloc_page(struct page_pool *pool, - struct idpf_rx_buf *buf, - unsigned int buf_size) -{ - if (buf_size == IDPF_RX_BUF_2048) - buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, - buf_size); - else - buf->page = page_pool_dev_alloc_pages(pool); - - if (!buf->page) - return DMA_MAPPING_ERROR; - - buf->truesize = buf_size; - - return page_pool_get_dma_addr(buf->page) + buf->page_offset + - pool->p.offset; -} - -/** - * idpf_rx_put_page - Return RX buffer page to pool - * @rx_buf: RX buffer metadata struct - */ -static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf) -{ - page_pool_put_page(rx_buf->page->pp, rx_buf->page, - rx_buf->truesize, true); - rx_buf->page = NULL; -} - -/** - * idpf_rx_sync_for_cpu - Synchronize DMA buffer - * @rx_buf: RX buffer metadata struct - * @len: frame length from descriptor - */ -static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len) -{ - struct page *page = rx_buf->page; - struct page_pool *pp = page->pp; - - dma_sync_single_range_for_cpu(pp->p.dev, - page_pool_get_dma_addr(page), - rx_buf->page_offset + pp->p.offset, len, - page_pool_get_dma_dir(pp)); -} - int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -991,35 +1049,27 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); void idpf_vport_intr_deinit(struct idpf_vport *vport); int idpf_vport_intr_init(struct idpf_vport *vport); void idpf_vport_intr_ena(struct idpf_vport *vport); -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size); -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size); -bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf); -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val); -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size); +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb); -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count); -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size); +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev); -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev); -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q); +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev); +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index a5f9b7a5effe..70986e12da28 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <net/libeth/rx.h> + #include "idpf.h" #include "idpf_virtchnl.h" @@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) int i; for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); + idpf_queue_set(SW_MARKER, vport->txqs[i]); event = wait_event_timeout(vport->sw_marker_wq, test_and_clear_bit(IDPF_VPORT_SW_MARKER, @@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) msecs_to_jiffies(500)); for (i = 0; i < vport->num_txq; i++) - clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_clear(POLL_MODE, vport->txqs[i]); if (event) return 0; @@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, int num_regs, u32 q_type) { struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u16 num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_regs; j++, k++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); vport_msg->vport_index = cpu_to_le16(idx); - if (adapter->req_tx_splitq) + if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); - if (adapter->req_rx_splitq) + if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); @@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport_msg = adapter->vport_params_recvd[vport->idx]; + if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && + (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || + vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { + pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); + return -EOPNOTSUPP; + } + rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); @@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport->base_rxd = true; } - if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) + if (!idpf_is_queue_model_split(vport->txq_model)) return 0; if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { @@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].model = cpu_to_le16(vport->txq_model); qi[k].type = - cpu_to_le32(tx_qgrp->txqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qi[k].ring_len = cpu_to_le16(tx_qgrp->txqs[j]->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->txqs[j]->dma); if (idpf_is_queue_model_split(vport->txq_model)) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; qi[k].tx_compl_queue_id = cpu_to_le16(tx_qgrp->complq->q_id); qi[k].relative_queue_id = cpu_to_le16(j); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) + if (idpf_queue_has(FLOW_SCH_EN, q)) qi[k].sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); else @@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qi[k].model = cpu_to_le16(vport->txq_model); - qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; else sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; @@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) goto setup_rxqs; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *bufq = + struct idpf_buf_queue *bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; qi[k].queue_id = cpu_to_le32(bufq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(bufq->q_type); + qi[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); qi[k].ring_len = cpu_to_le16(bufq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); - qi[k].buffer_notif_stride = bufq->rx_buf_stride; + qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; qi[k].rx_buffer_low_watermark = cpu_to_le16(bufq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) @@ -1591,35 +1604,47 @@ setup_rxqs: num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + const struct idpf_bufq_set *sets; + struct idpf_rx_queue *rxq; if (!idpf_is_queue_model_split(vport->rxq_model)) { rxq = rx_qgrp->singleq.rxqs[j]; goto common_qi_fields; } + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; - qi[k].rx_bufq1_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); + sets = rxq->bufq_sets; + + /* In splitq mode, RXQ buffer size should be + * set to that of the first buffer queue + * associated with this RXQ. + */ + rxq->rx_buf_size = sets[0].bufq.rx_buf_size; + + qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { qi[k].bufq2_ena = IDPF_BUFQ2_ENA; qi[k].rx_bufq2_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); + cpu_to_le16(sets[1].bufq.q_id); } qi[k].rx_buffer_low_watermark = cpu_to_le16(rxq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); -common_qi_fields: - if (rxq->rx_hsplit_en) { + rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; + + if (idpf_queue_has(HSPLIT_EN, rxq)) { qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); qi[k].hdr_buffer_size = cpu_to_le16(rxq->rx_hbuf_size); } + +common_qi_fields: qi[k].queue_id = cpu_to_le32(rxq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(rxq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); qi[k].ring_len = cpu_to_le16(rxq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); @@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) for (i = 0; i < vport->num_txq_grp; i++, k++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1741,12 +1766,12 @@ setup_rx: qc[k].start_queue_id = cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); qc[k].type = - cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } else { qc[k].start_queue_id = cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); qc[k].type = - cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1761,10 +1786,11 @@ setup_rx: struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *q; + const struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - qc[k].type = cpu_to_le32(q->q_type); + qc[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qc[k].start_queue_id = cpu_to_le32(q->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); if (idpf_is_queue_model_split(vport->txq_model)) { @@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (idpf_is_queue_model_split(vport->rxq_model)) rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; else rxq = rx_qgrp->singleq.rxqs[j]; - vqv[k].queue_type = cpu_to_le32(rxq->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); vqv[k].queue_id = cpu_to_le32(rxq->q_id); vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); @@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) * queues virtchnl message is sent */ for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_set(POLL_MODE, vport->txqs[i]); /* schedule the napi to receive all the marker packets */ local_bh_disable(); @@ -2469,39 +2497,52 @@ do_memcpy: * @frag: fragmentation allowed * */ -static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, +static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, struct idpf_ptype_state *pstate, bool ipv4, bool frag) { if (!pstate->outer_ip || !pstate->outer_frag) { - ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; pstate->outer_ip = true; if (ipv4) - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; else - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; if (frag) { - ptype->outer_frag = IDPF_RX_PTYPE_FRAG; + ptype->outer_frag = LIBETH_RX_PT_FRAG; pstate->outer_frag = true; } } else { - ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; + ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; if (ipv4) - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV4; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; else - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV6; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; if (frag) - ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; + ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; } } +static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) +{ + if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->inner_prot) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; + else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->outer_ip) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; + else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; + else + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; + + libeth_rx_pt_gen_hash_type(ptype); +} + /** * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info * @vport: virtual port data structure @@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) { struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; - struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; + struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; int max_ptype, ptypes_recvd = 0, ptype_offset; struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {}; @@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ssize_t reply_sz; int i, j, k; + if (vport->rx_ptype_lkup) + return 0; + if (idpf_is_queue_model_split(vport->rxq_model)) max_ptype = IDPF_RX_MAX_PTYPE; else max_ptype = IDPF_RX_MAX_BASE_PTYPE; - memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); + if (!ptype_lkup) + return -ENOMEM; get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); if (!get_ptype_info) @@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) /* 0xFFFF indicates end of ptypes */ if (le16_to_cpu(ptype->ptype_id_10) == IDPF_INVALID_PTYPE_ID) - return 0; + goto out; if (idpf_is_queue_model_split(vport->rxq_model)) k = le16_to_cpu(ptype->ptype_id_10); else k = ptype->ptype_id_8; - if (ptype->proto_id_count) - ptype_lkup[k].known = 1; - for (j = 0; j < ptype->proto_id_count; j++) { id = le16_to_cpu(ptype->proto_id[j]); switch (id) { @@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; + LIBETH_RX_PT_TUNNEL_IP_GRENAT; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT; } break; case VIRTCHNL2_PROTO_HDR_MAC: ptype_lkup[k].outer_ip = - IDPF_RX_PTYPE_OUTER_L2; + LIBETH_RX_PT_OUTER_L2; if (pstate.tunnel_state == IDPF_TUN_IP_GRE) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; } @@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; case VIRTCHNL2_PROTO_HDR_UDP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_UDP; + LIBETH_RX_PT_INNER_UDP; break; case VIRTCHNL2_PROTO_HDR_TCP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_TCP; + LIBETH_RX_PT_INNER_TCP; break; case VIRTCHNL2_PROTO_HDR_SCTP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_SCTP; + LIBETH_RX_PT_INNER_SCTP; break; case VIRTCHNL2_PROTO_HDR_ICMP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_ICMP; + LIBETH_RX_PT_INNER_ICMP; break; case VIRTCHNL2_PROTO_HDR_PAY: ptype_lkup[k].payload_layer = - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; + LIBETH_RX_PT_PAYLOAD_L2; break; case VIRTCHNL2_PROTO_HDR_ICMPV6: case VIRTCHNL2_PROTO_HDR_IPV6_EH: @@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; } } + + idpf_finalize_ptype_lookup(&ptype_lkup[k]); } } +out: + vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); + return 0; } @@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); - vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; + vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); @@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, int num_qids, u32 q_type) { - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { + for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) tx_qgrp->txqs[j]->q_id = qids[k]; - tx_qgrp->txqs[j]->q_type = - VIRTCHNL2_QUEUE_TYPE_TX; - } } break; case VIRTCHNL2_QUEUE_TYPE_RX: @@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_qids; j++, k++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; } } break; @@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; tx_qgrp->complq->q_id = qids[k]; - tx_qgrp->complq->q_type = - VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; } break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: @@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; } } break; diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index bd135d6dccca..f20926669318 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -6,7 +6,7 @@ /* Rx buffer management */ /** - * libeth_rx_hw_len - get the actual buffer size to be passed to HW + * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW * @pp: &page_pool_params of the netdev to calculate the size for * @max_len: maximum buffer size for a single descriptor * @@ -14,7 +14,7 @@ * MTU the @dev has, HW required alignment, minimum and maximum allowed values, * and system's page size. */ -static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) +static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len) { u32 len; @@ -27,6 +27,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) } /** + * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW + * @pp: &page_pool_params of the netdev to calculate the size for + * @max_len: maximum buffer size for a single descriptor + * @truesize: desired truesize for the buffers + * + * Return: HW-writeable length per one buffer to pass it to the HW ignoring the + * MTU and closest to the passed truesize. Can be used for "short" buffer + * queues to fragment pages more efficiently. + */ +static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp, + u32 max_len, u32 truesize) +{ + u32 min, len; + + min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE); + truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min), + PAGE_SIZE << LIBETH_RX_PAGE_ORDER); + + len = SKB_WITH_OVERHEAD(truesize - pp->offset); + len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE; + len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE), + pp->max_len); + + return len; +} + +/** + * libeth_rx_page_pool_params - calculate params with the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to will all needed stack overhead (headroom, tailroom) and + * both the HW buffer length and the truesize for all types of buffers. For + * "short" buffers, truesize never exceeds the "wanted" one; for the rest, + * it can be up to the page size. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + pp->offset = LIBETH_SKB_HEADROOM; + /* HW-writeable / syncable length per one page */ + pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset); + + /* HW-writeable length per buffer */ + switch (fq->type) { + case LIBETH_FQE_MTU: + fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len); + break; + case LIBETH_FQE_SHORT: + fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len, + fq->truesize); + break; + case LIBETH_FQE_HDR: + fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE); + break; + default: + return false; + } + + /* Buffer size to allocate */ + fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset + + fq->buf_len)); + + return true; +} + +/** + * libeth_rx_page_pool_params_zc - calculate params without the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to exclude the stack overhead and both the buffer length + * and the truesize, which are equal for the data buffers. Note that this + * requires separate header buffers to be always active and account the + * overhead. + * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy + * mode. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + u32 mtu, max; + + pp->offset = 0; + pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER; + + switch (fq->type) { + case LIBETH_FQE_MTU: + mtu = READ_ONCE(pp->netdev->mtu); + break; + case LIBETH_FQE_SHORT: + mtu = fq->truesize; + break; + default: + return false; + } + + mtu = roundup_pow_of_two(mtu); + max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX), + pp->max_len); + + fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max); + fq->truesize = fq->buf_len; + + return true; +} + +/** * libeth_rx_fq_create - create a PP with the default libeth settings * @fq: buffer queue struct to fill * @napi: &napi_struct covering this PP (no usage outside its poll loops) @@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) .netdev = napi->dev, .napi = napi, .dma_dir = DMA_FROM_DEVICE, - .offset = LIBETH_SKB_HEADROOM, }; struct libeth_fqe *fqes; struct page_pool *pool; + bool ret; - /* HW-writeable / syncable length per one page */ - pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset); - - /* HW-writeable length per buffer */ - fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len); - /* Buffer size to allocate */ - fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset + - fq->buf_len)); + if (!fq->hsplit) + ret = libeth_rx_page_pool_params(fq, &pp); + else + ret = libeth_rx_page_pool_params_zc(fq, &pp); + if (!ret) + return -EINVAL; pool = page_pool_create(&pp); if (IS_ERR(pool)) diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 5352fee62d2b..0b9982804370 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) if (ch->dma.irq) free_irq(ch->dma.irq, priv); if (IS_RX(ch->idx)) { - int desc; + struct ltq_dma_channel *dma = &ch->dma; - for (desc = 0; desc < LTQ_DESC_NUM; desc++) + for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++) dev_kfree_skb_any(ch->skb[ch->dma.desc]); } } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9adf4301c9b1..1e52256a9ea8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2766,29 +2766,29 @@ static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, } } -static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +static u32 mvpp2_usec_to_cycles(u32 usec, u32 clk_hz) { u64 tmp = (u64)clk_hz * usec; do_div(tmp, USEC_PER_SEC); - return tmp > U32_MAX ? U32_MAX : tmp; + return min(tmp, U32_MAX); } -static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +static u32 mvpp2_cycles_to_usec(u32 cycles, u32 clk_hz) { u64 tmp = (u64)cycles * USEC_PER_SEC; do_div(tmp, clk_hz); - return tmp > U32_MAX ? U32_MAX : tmp; + return min(tmp, U32_MAX); } /* Set the time delay in usec before Rx interrupt */ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - unsigned long freq = port->priv->tclk; + u32 freq = port->priv->tclk; u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { @@ -2804,7 +2804,7 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) { - unsigned long freq = port->priv->tclk; + u32 freq = port->priv->tclk; u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index ebf84c240d6d..ed2160cc5acb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1753,7 +1753,7 @@ struct cpt_lf_alloc_req_msg { u16 nix_pf_func; u16 sso_pf_func; u16 eng_grpmsk; - int blkaddr; + u8 blkaddr; u8 ctx_ilen_valid : 1; u8 ctx_ilen : 7; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index d883157393ea..6c3aca6f278d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype { NPC_LT_LB_CUSTOM1 = 0xF, }; +/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP + * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must + * differ only at bit 0 so mask 0xE can be used to detect extended headers. + */ enum npc_kpu_lc_ltype { - NPC_LT_LC_IP = 1, + NPC_LT_LC_PTP = 1, + NPC_LT_LC_IP, NPC_LT_LC_IP_OPT, NPC_LT_LC_IP6, NPC_LT_LC_IP6_EXT, @@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype { NPC_LT_LC_RARP, NPC_LT_LC_MPLS, NPC_LT_LC_NSH, - NPC_LT_LC_PTP, NPC_LT_LC_FCOE, NPC_LT_LC_NGIO, NPC_LT_LC_CUSTOM0 = 0xE, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5a4f774aed64..ac7ee3f3598c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, if (req->ssow > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid SSOW req, %d > max %d\n", - pcifunc, req->sso, block->lf.max); + pcifunc, req->ssow, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index f047185f38e0..3e09d2285814 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -696,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req, struct cpt_rd_wr_reg_msg *rsp) { - int blkaddr; + u64 offset = req->reg_offset; + int blkaddr, lf; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) @@ -707,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; - rsp->reg_offset = req->reg_offset; - rsp->ret_val = req->ret_val; - rsp->is_write = req->is_write; - if (!is_valid_offset(rvu, req)) return CPT_AF_ERR_ACCESS_DENIED; + /* Translate local LF used by VFs to global CPT LF */ + lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc, + (offset & 0xFFF) >> 3); + + /* Translate local LF's offset to global CPT LF's offset */ + offset &= 0xFF000; + offset += lf << 3; + + rsp->reg_offset = offset; + rsp->ret_val = req->ret_val; + rsp->is_write = req->is_write; + if (req->is_write) - rvu_write64(rvu, blkaddr, req->reg_offset, req->val); + rvu_write64(rvu, blkaddr, offset, req->val); else - rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); + rsp->val = rvu_read64(rvu, blkaddr, offset); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4dbbaa719cbf..222f9e00b836 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -3862,6 +3862,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) return -ERANGE; } +/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ +#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) +/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ +#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) + static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { int idx, nr_field, key_off, field_marker, keyoff_marker; @@ -3931,7 +3936,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->hdr_offset = 9; /* offset */ field->bytesm1 = 0; /* 1 byte */ field->ltype_match = NPC_LT_LC_IP; - field->ltype_mask = 0xF; + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_INNR_IPV4: @@ -3958,8 +3963,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 3; /* DIP, 4 bytes */ } } - - field->ltype_mask = 0xF; /* Match only IPv4 */ + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: @@ -3988,7 +3992,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 15; /* DIP,16 bytes */ } } - field->ltype_mask = 0xF; /* Match only IPv6 */ + field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 31aebeb2e285..25989c79c92e 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev) { struct device_node *of_node; struct mtk_star_priv *priv; + struct phy_device *phydev; struct net_device *ndev; struct device *dev; void __iomem *base; @@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll); netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll); + phydev = of_phy_find_device(priv->phy_node); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + return devm_register_netdev(dev, ndev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 4d6225e0eec7..1e8b7d330701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress); + +static inline bool mlx5e_fs_has_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->hw_features & NETIF_F_NTUPLE; +} + +static inline bool mlx5e_fs_want_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->features & NETIF_F_NTUPLE; +} + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3320f12ba2db..5582c93a62f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -525,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE); + arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev); if (arfs_enabled) mlx5e_arfs_disable(priv->fs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 8c5b291a171f..05058710d2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1307,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, return -EOPNOTSUPP; mlx5e_fs_set_ns(fs, ns, false); - err = mlx5e_arfs_create_tables(fs, rx_res, - !!(netdev->hw_features & NETIF_F_NTUPLE)); + err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev)); if (err) { fs_err(fs, "Failed to create arfs tables, err=%d\n", err); netdev->hw_features &= ~NETIF_F_NTUPLE; @@ -1355,7 +1354,7 @@ err_destroy_ttc_table: err_destroy_inner_ttc_table: mlx5e_destroy_inner_ttc_table(fs); err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ff335527c10a..6f686fabed44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5556,8 +5556,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif -#ifdef CONFIG_MLX5_EN_ARFS +#if IS_ENABLED(CONFIG_MLX5_EN_ARFS) netdev->hw_features |= NETIF_F_NTUPLE; +#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC) + netdev->features |= NETIF_F_NTUPLE; #endif } @@ -5731,7 +5733,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); @@ -5747,7 +5749,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); mlx5e_rx_res_destroy(priv->rx_res); priv->rx_res = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ac1565c0c8af..4326aa42bf2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1187,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; int max_dev_eqs; - int max_eqs_sf; int num_eqs; /* If ethernet is disabled we use just a single completion vector to @@ -1202,7 +1201,11 @@ static int get_num_eqs(struct mlx5_core_dev *dev) num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table), max_dev_eqs - MLX5_MAX_ASYNC_EQS); if (mlx5_core_is_sf(dev)) { - max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF, + int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ? + MLX5_CAP_GEN_2(dev, max_num_eqs_24b) : + MLX5_COMP_EQS_PER_SF; + + max_eqs_sf = min_t(int, max_eqs_sf, mlx5_irq_table_get_sfs_vec(eq_table->irq_table)); num_eqs = min_t(int, num_eqs, max_eqs_sf); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 88745dc6aed5..578466d69f21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -223,6 +223,7 @@ struct mlx5_vport { u16 vport; bool enabled; + bool max_eqs_set; enum mlx5_eswitch_vport_event enabled_events; int index; struct mlx5_devlink_port *dl_port; @@ -579,6 +580,8 @@ int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 72949cb85244..768199d2255a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -68,6 +68,7 @@ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) #define MLX5_ESW_MAX_CTRL_EQS 4 +#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { .max_fte = MLX5_ESW_VPORT_TBL_SIZE, @@ -4676,13 +4677,25 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs); + if (mlx5_esw_is_sf_vport(esw, vport_num)) + MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1); + err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); - + vport->max_eqs_set = true; out: mutex_unlock(&esw->state_lock); kfree(query_ctx); return err; } + +int +mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return mlx5_devlink_port_fn_max_io_eqs_set(port, + MLX5_ESW_DEFAULT_SF_COMP_EQS, + extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 8e0404c0d1ca..0979d672d47f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) mlx5e_fs_set_ns(priv->fs, ns, false); err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_fs_has_arfs(priv->netdev)); if (err) { netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", err); @@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) return 0; err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); return err; } @@ -400,8 +399,7 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv->fs); - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); mlx5e_ethtool_cleanup_steering(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 401d39069680..86208b86eea8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -16,6 +16,7 @@ #endif #define MLX5_SFS_PER_CTRL_IRQ 64 +#define MLX5_MAX_MSIX_PER_SF 256 #define MLX5_IRQ_CTRL_SF_MAX 8 /* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 @@ -589,8 +590,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) { struct mlx5_irq_table *table = dev->priv.irq_table; - int num_sf_ctrl_by_msix; - int num_sf_ctrl_by_sfs; int num_sf_ctrl; int err; @@ -608,10 +607,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) } /* init sf_ctrl_pool */ - num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF); - num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev), - MLX5_SFS_PER_CTRL_IRQ); - num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs); + num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev), + MLX5_SFS_PER_CTRL_IRQ); num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl); table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl, "mlx5_sf_ctrl", @@ -726,8 +723,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) total_vec = pcif_vec; if (mlx5_sf_max_functions(dev)) - total_vec += MLX5_IRQ_CTRL_SF_MAX + - MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); + total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev); total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev)); pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 6c11e075cab0..a96be98be032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, struct netlink_ext_ack *extack) { + struct mlx5_vport *vport; int err; if (mlx5_sf_is_active(sf)) @@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, return -EBUSY; } + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) { + err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port, + extack); + if (err) + return err; + } err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id); if (err) return err; @@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) { + struct mlx5_vport *vport; + mutex_lock(&table->sf_state_lock); + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + vport->max_eqs_set = false; mlx5_sf_function_id_erase(table, sf); diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 6453c92f0fa7..7fa1820db9cc 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) netif_dbg(ks, intr, ks->netdev, "%s: txspace %d\n", __func__, tx_space); - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); ks->tx_space = tx_space; if (netif_queue_stopped(ks->netdev)) netif_wake_queue(ks->netdev); - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); } if (status & IRQ_SPIBEI) { @@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_IER, ks->rc_ier); ks->queued_len = 0; + ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); @@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev) /* schedule work to do the actual set of the data if needed */ - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); schedule_work(&ks->rxctrl_work); } - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); } static int ks8851_set_mac_address(struct net_device *dev, void *addr) @@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, int ret; ks->netdev = netdev; - ks->tx_space = 6144; ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); ret = PTR_ERR_OR_ZERO(ks->gpio); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 670c1de966db..3062cc0f9199 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work) tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); - spin_lock(&ks->statelock); + spin_lock_bh(&ks->statelock); ks->queued_len -= dequeued_len; ks->tx_space = tx_space; - spin_unlock(&ks->statelock); + spin_unlock_bh(&ks->statelock); ks8851_unlock_spi(ks, &flags); } diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 7a1c9337081b..36114ce88034 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { .getsda = falcon_getsda, .getscl = falcon_getscl, .udelay = 5, - /* Wait up to 50 ms for slave to let us pull SCL high */ + /* Wait up to 50 ms for target to let us pull SCL high */ .timeout = DIV_ROUND_UP(HZ, 20), }; diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index f9afea25044f..4dc057c121f5 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -442,6 +442,42 @@ static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev) i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); } +static void mctp_i2c_invalidate_tx_flow(struct mctp_i2c_dev *midev, + struct sk_buff *skb) +{ + struct mctp_sk_key *key; + struct mctp_flow *flow; + unsigned long flags; + bool release; + + flow = skb_ext_find(skb, SKB_EXT_MCTP); + if (!flow) + return; + + key = flow->key; + if (!key) + return; + + spin_lock_irqsave(&key->lock, flags); + if (key->manual_alloc) { + /* we don't have control over lifetimes for manually-allocated + * keys, so cannot assume we can invalidate all future flows + * that would use this key. + */ + release = false; + } else { + release = key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE; + key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID; + } + spin_unlock_irqrestore(&key->lock, flags); + + /* if we have changed state from active, the flow held a reference on + * the lock; release that now. + */ + if (release) + mctp_i2c_unlock_nest(midev); +} + static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) { struct net_device_stats *stats = &midev->ndev->stats; @@ -500,6 +536,11 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) case MCTP_I2C_TX_FLOW_EXISTING: /* existing flow: we already have the lock; just tx */ rc = __i2c_transfer(midev->adapter, &msg, 1); + + /* on tx errors, the flow can no longer be considered valid */ + if (rc) + mctp_i2c_invalidate_tx_flow(midev, skb); + break; case MCTP_I2C_TX_FLOW_INVALID: diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ab8a0623b1a1..5ef680cf994a 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -344,7 +344,7 @@ static ssize_t enabled_store(struct config_item *item, goto out_unlock; err = -EINVAL; - if ((bool)enabled == nt->enabled) { + if (enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); goto out_unlock; @@ -369,6 +369,7 @@ static ssize_t enabled_store(struct config_item *item, if (err) goto out_unlock; + nt->enabled = true; pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up @@ -381,8 +382,6 @@ static ssize_t enabled_store(struct config_item *item, netpoll_cleanup(&nt->np); } - nt->enabled = enabled; - mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index d7616b13c594..551e37786c2d 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -4,6 +4,7 @@ */ #include <linux/bitfield.h> +#include <linux/ethtool_netlink.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/phy.h> @@ -29,6 +30,10 @@ #define DP83TD510E_INT1_LINK BIT(13) #define DP83TD510E_INT1_LINK_EN BIT(5) +#define DP83TD510E_CTRL 0x1f +#define DP83TD510E_CTRL_HW_RESET BIT(15) +#define DP83TD510E_CTRL_SW_RESET BIT(14) + #define DP83TD510E_AN_STAT_1 0x60c #define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15) @@ -53,6 +58,117 @@ static const u16 dp83td510_mse_sqi_map[] = { 0x0000 /* 24dB =< SNR */ }; +/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY + * + * I assume that this PHY is using a variation of Spread Spectrum Time Domain + * Reflectometry (SSTDR) rather than the commonly used TDR found in many PHYs. + * Here are the following observations which likely confirm this: + * - The DP83TD510 PHY transmits a modulated signal of configurable length + * (default 16000 µs) instead of a single pulse pattern, which is typical + * for traditional TDR. + * - The pulse observed on the wire, triggered by the HW RESET register, is not + * part of the cable testing process. + * + * I assume that SSTDR seems to be a logical choice for the 10BaseT1L + * environment due to improved noise resistance, making it suitable for + * environments with significant electrical noise, such as long 10BaseT1L cable + * runs. + * + * Configuration Variables: + * The SSTDR variation used in this PHY involves more configuration variables + * that can dramatically affect the functionality and precision of cable + * testing. Since most of these configuration options are either not well + * documented or documented with minimal details, the following sections + * describe my understanding and observations of these variables and their + * impact on TDR functionality. + * + * Timeline: + * ,<--cfg_pre_silence_time + * | ,<-SSTDR Modulated Transmission + * | | ,<--cfg_post_silence_time + * | | | ,<--Force Link Mode + * |<--'-->|<-------'------->|<--'-->|<--------'------->| + * + * - cfg_pre_silence_time: Optional silence time before TDR transmission starts. + * - SSTDR Modulated Transmission: Transmission duration configured by + * cfg_tdr_tx_duration and amplitude configured by cfg_tdr_tx_type. + * - cfg_post_silence_time: Silence time after TDR transmission. + * - Force Link Mode: If nothing is configured after cfg_post_silence_time, + * the PHY continues in force link mode without autonegotiation. + */ + +#define DP83TD510E_TDR_CFG 0x1e +#define DP83TD510E_TDR_START BIT(15) +#define DP83TD510E_TDR_DONE BIT(1) +#define DP83TD510E_TDR_FAIL BIT(0) + +#define DP83TD510E_TDR_CFG1 0x300 +/* cfg_tdr_tx_type: Transmit voltage level for TDR. + * 0 = 1V, 1 = 2.4V + * Note: Using different voltage levels may not work + * in all configuration variations. For example, setting + * 2.4V may give different cable length measurements. + * Other settings may be needed to make it work properly. + */ +#define DP83TD510E_TDR_TX_TYPE BIT(12) +#define DP83TD510E_TDR_TX_TYPE_1V 0 +#define DP83TD510E_TDR_TX_TYPE_2_4V 1 +/* cfg_post_silence_time: Time after the TDR sequence. Since we force master mode + * for the TDR will proceed with forced link state after this time. For Linux + * it is better to set max value to avoid false link state detection. + */ +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME GENMASK(3, 2) +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_0MS 0 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_10MS 1 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_100MS 2 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS 3 +/* cfg_pre_silence_time: Time before the TDR sequence. It should be enough to + * settle down all pulses and reflections. Since for 10BASE-T1L we have + * maximum 2000m cable length, we can set it to 1ms. + */ +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME GENMASK(1, 0) +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_0MS 0 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS 1 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_100MS 2 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_1000MS 3 + +#define DP83TD510E_TDR_CFG2 0x301 +#define DP83TD510E_TDR_END_TAP_INDEX_1 GENMASK(14, 8) +#define DP83TD510E_TDR_END_TAP_INDEX_1_DEF 36 +#define DP83TD510E_TDR_START_TAP_INDEX_1 GENMASK(6, 0) +#define DP83TD510E_TDR_START_TAP_INDEX_1_DEF 4 + +#define DP83TD510E_TDR_CFG3 0x302 +/* cfg_tdr_tx_duration: Duration of the TDR transmission in microseconds. + * This value sets the duration of the modulated signal used for TDR + * measurements. + * - Default: 16000 µs + * - Observation: A minimum duration of 6000 µs is recommended to ensure + * accurate detection of cable faults. Durations shorter than 6000 µs may + * result in incomplete data, especially for shorter cables (e.g., 20 meters), + * leading to false "OK" results. Longer durations (e.g., 6000 µs or more) + * provide better accuracy, particularly for detecting open circuits. + */ +#define DP83TD510E_TDR_TX_DURATION_US GENMASK(15, 0) +#define DP83TD510E_TDR_TX_DURATION_US_DEF 16000 + +#define DP83TD510E_TDR_FAULT_CFG1 0x303 +#define DP83TD510E_TDR_FLT_LOC_OFFSET_1 GENMASK(14, 8) +#define DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF 4 +#define DP83TD510E_TDR_FLT_INIT_1 GENMASK(7, 0) +#define DP83TD510E_TDR_FLT_INIT_1_DEF 62 + +#define DP83TD510E_TDR_FAULT_STAT 0x30c +#define DP83TD510E_TDR_PEAK_DETECT BIT(11) +#define DP83TD510E_TDR_PEAK_SIGN BIT(10) +#define DP83TD510E_TDR_PEAK_LOCATION GENMASK(9, 0) + +/* Not documented registers and values but recommended according to + * "DP83TD510E Cable Diagnostics Toolkit revC" + */ +#define DP83TD510E_UNKN_030E 0x30e +#define DP83TD510E_030E_VAL 0x2520 + static int dp83td510_config_intr(struct phy_device *phydev) { int ret; @@ -198,6 +314,151 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev) return DP83TD510_SQI_MAX; } +/** + * dp83td510_cable_test_start - Start the cable test for the DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * + * This sequence is implemented according to the "Application Note DP83TD510E + * Cable Diagnostics Toolkit revC". + * + * Returns: 0 on success, a negative error code on failure. + */ +static int dp83td510_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL, + DP83TD510E_CTRL_HW_RESET); + if (ret) + return ret; + + ret = genphy_c45_an_disable_aneg(phydev); + if (ret) + return ret; + + /* Force master mode */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST); + if (ret) + return ret; + + /* There is no official recommendation for this register, but it is + * better to use 1V for TDR since other values seems to be optimized + * for this amplitude. Except of amplitude, it is better to configure + * pre TDR silence time to 10ms to avoid false reflections (value 0 + * seems to be too short, otherwise we need to implement own silence + * time). Also, post TDR silence time should be set to 1000ms to avoid + * false link state detection, it fits to the polling time of the + * PHY framework. The idea is to wait until + * dp83td510_cable_test_get_status() will be called and reconfigure + * the PHY to the default state within the post silence time window. + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG1, + DP83TD510E_TDR_TX_TYPE | + DP83TD510E_TDR_CFG1_POST_SILENCE_TIME | + DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME, + DP83TD510E_TDR_TX_TYPE_1V | + DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS | + DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG2, + FIELD_PREP(DP83TD510E_TDR_END_TAP_INDEX_1, + DP83TD510E_TDR_END_TAP_INDEX_1_DEF) | + FIELD_PREP(DP83TD510E_TDR_START_TAP_INDEX_1, + DP83TD510E_TDR_START_TAP_INDEX_1_DEF)); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_FAULT_CFG1, + FIELD_PREP(DP83TD510E_TDR_FLT_LOC_OFFSET_1, + DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF) | + FIELD_PREP(DP83TD510E_TDR_FLT_INIT_1, + DP83TD510E_TDR_FLT_INIT_1_DEF)); + if (ret) + return ret; + + /* Undocumented register, from the "Application Note DP83TD510E Cable + * Diagnostics Toolkit revC". + */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_UNKN_030E, + DP83TD510E_030E_VAL); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG3, + DP83TD510E_TDR_TX_DURATION_US_DEF); + if (ret) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL, + DP83TD510E_CTRL_SW_RESET); + if (ret) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG, + DP83TD510E_TDR_START); +} + +/** + * dp83td510_cable_test_get_status - Get the status of the cable test for the + * DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * @finished: Pointer to a boolean that indicates whether the test is finished. + * + * The function sets the @finished flag to true if the test is complete. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int dp83td510_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret, stat; + + *finished = false; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG); + if (ret < 0) + return ret; + + if (!(ret & DP83TD510E_TDR_DONE)) + return 0; + + if (!(ret & DP83TD510E_TDR_FAIL)) { + int location; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_TDR_FAULT_STAT); + if (ret < 0) + return ret; + + if (ret & DP83TD510E_TDR_PEAK_DETECT) { + if (ret & DP83TD510E_TDR_PEAK_SIGN) + stat = ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + else + stat = ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + + location = FIELD_GET(DP83TD510E_TDR_PEAK_LOCATION, + ret) * 100; + ethnl_cable_test_fault_length(phydev, + ETHTOOL_A_CABLE_PAIR_A, + location); + } else { + stat = ETHTOOL_A_CABLE_RESULT_CODE_OK; + } + } else { + /* Most probably we have active link partner */ + stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } + + *finished = true; + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat); + + return phy_init_hw(phydev); +} + static int dp83td510_get_features(struct phy_device *phydev) { /* This PHY can't respond on MDIO bus if no RMII clock is enabled. @@ -221,6 +482,7 @@ static struct phy_driver dp83td510_driver[] = { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID), .name = "TI DP83TD510E", + .flags = PHY_POLL_CABLE_TEST, .config_aneg = dp83td510_config_aneg, .read_status = dp83td510_read_status, .get_features = dp83td510_get_features, @@ -228,6 +490,8 @@ static struct phy_driver dp83td510_driver[] = { .handle_interrupt = dp83td510_handle_interrupt, .get_sqi = dp83td510_get_sqi, .get_sqi_max = dp83td510_get_sqi_max, + .cable_test_start = dp83td510_cable_test_start, + .cable_test_get_status = dp83td510_cable_test_get_status, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index a838b61cd844..a35528497a57 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev) ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, lan87xx_cable_test_report_trans(detect)); - return 0; + return phy_init_hw(phydev); } static int lan87xx_cable_test_get_status(struct phy_device *phydev, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 0a65b6d690fe..eb9acfcaeb09 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -70,6 +70,7 @@ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ #define PPP_PROTO_LEN 2 +#define PPP_LCP_HDRLEN 4 /* * An instance of /dev/ppp can be associated with either a ppp @@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf, return ret; } +static bool ppp_check_packet(struct sk_buff *skb, size_t count) +{ + /* LCP packets must include LCP header which 4 bytes long: + * 1-byte code, 1-byte identifier, and 2-byte length. + */ + return get_unaligned_be16(skb->data) != PPP_LCP || + count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN; +} + static ssize_t ppp_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, kfree_skb(skb); goto out; } + ret = -EINVAL; + if (unlikely(!ppp_check_packet(skb, count))) { + kfree_skb(skb); + goto out; + } switch (pf->kind) { case INTERFACE: diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index 29cc76a66c13..0af7db80b2f8 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -589,7 +589,7 @@ static int pd692x0_pi_set_pw_from_table(struct device *dev, if (pw < pw_table->class_pw) { dev_err(dev, - "Power limit %dmW not supported. Ranges availables: [%d-%d] or [%d-%d]\n", + "Power limit %dmW not supported. Ranges available: [%d-%d] or [%d-%d]\n", pw, (pw_table - 1)->class_pw, (pw_table - 1)->class_pw + (pw_table - 1)->max_added_class_pw, diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 0ba714ca5185..4b8528206cc8 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits) if (bits == 32) { *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); } else if (bits == 128) { - ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); - ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); + ((u64 *)dst)[0] = get_unaligned_be64(src); + ((u64 *)dst)[1] = get_unaligned_be64(src + 8); } } diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index 1ea4f874e367..7eb76724b3ed 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) */ static inline int wg_cpumask_next_online(int *last_cpu) { - int cpu = cpumask_next(*last_cpu, cpu_online_mask); + int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); - *last_cpu = cpu; + WRITE_ONCE(*last_cpu, cpu); return cpu; } diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c index 0d48e0f4a1ba..26e09c30d596 100644 --- a/drivers/net/wireguard/send.c +++ b/drivers/net/wireguard/send.c @@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer) { struct sk_buff *skb; - if (skb_queue_empty(&peer->staged_packet_queue)) { + if (skb_queue_empty_lockless(&peer->staged_packet_queue)) { skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, GFP_ATOMIC); if (unlikely(!skb)) diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 198fb359a688..86485580dd89 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, - enum hal_encrypt_type enctype) +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h index 623da3bf9dc8..c322e30caa96 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.h +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_RX_H #define ATH11K_DP_RX_H @@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab); int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer); +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype); + #endif /* ATH11K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 21819b741701..8522c67baabf 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -353,8 +353,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { @@ -584,8 +588,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h index 61be2265e09f..795fe3b8fa0d 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.h +++ b/drivers/net/wireless/ath/ath11k/dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_TX_H @@ -13,7 +13,7 @@ struct ath11k_dp_htt_wbm_tx_status { u32 msdu_id; bool acked; - int ack_rssi; + s8 ack_rssi; u16 peer_id; }; diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h index c5e88364afe5..46d17abd808b 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.h +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HAL_TX_H @@ -54,7 +54,7 @@ struct hal_tx_info { struct hal_tx_status { enum hal_wbm_rel_src_module buf_rel_source; enum hal_wbm_tqm_rel_reason status; - u8 ack_rssi; + s8 ack_rssi; u32 flags; /* %HAL_TX_STATUS_FLAGS_ */ u32 ppdu_id; u8 try_cnt; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index c9a13b88c804..ba910ae2c676 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -4229,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; @@ -4238,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif, arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; - case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; - break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); @@ -5903,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, { struct ath11k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ieee80211_tx_info *info; + enum hal_encrypt_type enctype; + unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; @@ -5927,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { - skb_put(skb, IEEE80211_CCMP_MIC_LEN); + if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET)) + ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET"); + + enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); + mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype); + skb_put(skb, mic_len); } } @@ -8977,8 +8984,11 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } - sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) + - ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); + + if (!db2dbm) + sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } @@ -9020,7 +9030,12 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, offload = &arvif->arp_ns_offload; count = 0; - /* Note: read_lock_bh() calls rcu_read_lock() */ + /* The _ipv6_changed() is called with RCU lock already held in + * atomic_notifier_call_chain(), so we don't need to call + * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT + * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK + * because RCU read critical section is allowed to get nested. + */ read_lock_bh(&idev->lock); memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr)); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index aa160e6fe24f..1bc648920ab6 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2859,7 +2859,7 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) { - int timeout; + long time_left; if (!ath11k_core_coldboot_cal_support(ab) || ab->hw_params.cbcal_restart_fw == 0) @@ -2867,11 +2867,11 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n"); - timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, - (ab->qmi.cal_done == 1), - ATH11K_COLD_BOOT_FW_RESET_DELAY); + time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); - if (timeout <= 0) { + if (time_left <= 0) { ath11k_warn(ab, "Coldboot Calibration timed out\n"); return -ETIMEDOUT; } @@ -2886,7 +2886,7 @@ EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot); static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) { - int timeout; + long time_left; int ret; ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); @@ -2897,10 +2897,10 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n"); - timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, - (ab->qmi.cal_done == 1), - ATH11K_COLD_BOOT_FW_RESET_DELAY); - if (timeout <= 0) { + time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); + if (time_left <= 0) { ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile index d42480db7463..5a1ed20d730e 100644 --- a/drivers/net/wireless/ath/ath12k/Makefile +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -23,9 +23,10 @@ ath12k-y += core.o \ fw.o \ p2p.o -ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o +ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o +ath12k-$(CONFIG_PM) += wow.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c index 443ba12e01f3..0555d35aab47 100644 --- a/drivers/net/wireless/ath/ath12k/acpi.c +++ b/drivers/net/wireless/ath/ath12k/acpi.c @@ -391,4 +391,6 @@ void ath12k_acpi_stop(struct ath12k_base *ab) acpi_remove_notify_handler(ACPI_HANDLE(ab->dev), ACPI_DEVICE_NOTIFY, ath12k_acpi_dsm_notify); + + memset(&ab->acpi, 0, sizeof(ab->acpi)); } diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 4c3eab4686c2..51252e8bc1ae 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -16,6 +16,7 @@ #include "hif.h" #include "fw.h" #include "debugfs.h" +#include "wow.h" unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); @@ -42,14 +43,38 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab) return ret; } -int ath12k_core_suspend(struct ath12k_base *ab) +/* Check if we need to continue with suspend/resume operation. + * Return: + * a negative value: error happens and don't continue. + * 0: no error but don't continue. + * positive value: no error and do continue. + */ +static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab) { struct ath12k *ar; - int ret, i; if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; + /* so far single_pdev_only chips have supports_suspend as true + * so pass 0 as a dummy pdev_id here. + */ + ar = ab->pdevs[0].ar; + if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF) + return 0; + + return 1; +} + +int ath12k_core_suspend(struct ath12k_base *ab) +{ + struct ath12k *ar; + int ret, i; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; + for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; if (!ar) @@ -80,8 +105,13 @@ EXPORT_SYMBOL(ath12k_core_suspend); int ath12k_core_suspend_late(struct ath12k_base *ab) { - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + int ret; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; + + ath12k_acpi_stop(ab); ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); @@ -96,8 +126,9 @@ int ath12k_core_resume_early(struct ath12k_base *ab) { int ret; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; reinit_completion(&ab->restart_completed); ret = ath12k_hif_power_up(ab); @@ -111,9 +142,11 @@ EXPORT_SYMBOL(ath12k_core_resume_early); int ath12k_core_resume(struct ath12k_base *ab) { long time_left; + int ret; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; time_left = wait_for_completion_timeout(&ab->restart_completed, ATH12K_RESET_TIMEOUT_HZ); @@ -1059,8 +1092,6 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) mutex_unlock(&ar->conf_mutex); } - /* Restart after all the link/radio halt */ - ieee80211_restart_hw(ah->hw); break; case ATH12K_HW_STATE_OFF: ath12k_warn(ab, @@ -1087,7 +1118,8 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) static void ath12k_core_restart(struct work_struct *work) { struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); - int ret; + struct ath12k_hw *ah; + int ret, i; ret = ath12k_core_reconfigure_on_crash(ab); if (ret) { @@ -1095,8 +1127,12 @@ static void ath12k_core_restart(struct work_struct *work) return; } - if (ab->is_reset) - complete_all(&ab->reconfigure_complete); + if (ab->is_reset) { + for (i = 0; i < ab->num_hw; i++) { + ah = ab->ah[i]; + ieee80211_restart_hw(ah->hw); + } + } complete(&ab->restart_completed); } @@ -1150,20 +1186,14 @@ static void ath12k_core_reset(struct work_struct *work) ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; - atomic_set(&ab->recovery_start_count, 0); - reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_count, 0); ath12k_core_pre_reconfigure_recovery(ab); - reinit_completion(&ab->reconfigure_complete); ath12k_core_post_reconfigure_recovery(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); - time_left = wait_for_completion_timeout(&ab->recovery_start, - ATH12K_RECOVER_START_TIMEOUT_HZ); - ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); @@ -1275,8 +1305,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, mutex_init(&ab->core_lock); spin_lock_init(&ab->base_lock); init_completion(&ab->reset_complete); - init_completion(&ab->reconfigure_complete); - init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); @@ -1288,6 +1316,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->restart_completed); + init_completion(&ab->wow.wakeup_completed); ab->dev = dev; ab->hif.bus = bus; diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index d03326a68a9f..cdfd43a7321a 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -28,6 +28,8 @@ #include "dbring.h" #include "fw.h" #include "acpi.h" +#include "wow.h" +#include "debugfs_htt_stats.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -229,6 +231,13 @@ struct ath12k_vif_cache { u32 bss_conf_changed; }; +struct ath12k_rekey_data { + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KCK_LEN]; + u64 replay_ctr; + bool enable_offload; +}; + struct ath12k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; @@ -285,6 +294,7 @@ struct ath12k_vif { u32 punct_bitmap; bool ps; struct ath12k_vif_cache *cache; + struct ath12k_rekey_data rekey_data; }; struct ath12k_vif_iter { @@ -472,8 +482,17 @@ struct ath12k_fw_stats { struct list_head bcn; }; +struct ath12k_dbg_htt_stats { + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 reset; + struct debug_htt_stats_req *stats_req; +}; + struct ath12k_debug { struct dentry *debugfs_pdev; + struct dentry *debugfs_pdev_symlink; + struct ath12k_dbg_htt_stats htt_stats; }; struct ath12k_per_peer_tx_stats { @@ -606,6 +625,9 @@ struct ath12k { struct work_struct wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; + struct ath12k_wow wow; + struct completion target_suspend; + bool target_suspend_ack; struct ath12k_per_peer_tx_stats peer_tx_stats; struct list_head ppdu_stats_info; u32 ppdu_stat_list_depth; @@ -625,12 +647,12 @@ struct ath12k { u32 freq_low; u32 freq_high; + + bool nlo_enabled; }; struct ath12k_hw { struct ieee80211_hw *hw; - struct ath12k_base *ab; - /* Protect the write operation of the hardware state ath12k_hw::state * between hardware start<=>reconfigure<=>stop transitions. */ @@ -766,6 +788,11 @@ struct ath12k_base { const struct ath12k_hif_ops *ops; } hif; + struct { + struct completion wakeup_completed; + u32 wmi_conf_rx_decap_mode; + } wow; + struct ath12k_ce ce; struct timer_list rx_replenish_retry; struct ath12k_hal hal; @@ -848,11 +875,8 @@ struct ath12k_base { struct work_struct reset_work; atomic_t reset_count; atomic_t recovery_count; - atomic_t recovery_start_count; bool is_reset; struct completion reset_complete; - struct completion reconfigure_complete; - struct completion recovery_start; /* continuous recovery fail count */ atomic_t fail_cont_count; unsigned long reset_fail_timeout; diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h index aa685295f8a4..f7005917362c 100644 --- a/drivers/net/wireless/ath/ath12k/debug.h +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_DEBUG_H_ @@ -25,6 +25,7 @@ enum ath12k_debug_mask { ATH12K_DBG_PCI = 0x00001000, ATH12K_DBG_DP_TX = 0x00002000, ATH12K_DBG_DP_RX = 0x00004000, + ATH12K_DBG_WOW = 0x00008000, ATH12K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index 8d8ba951093b..2a977c36af00 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -6,6 +6,7 @@ #include "core.h" #include "debugfs.h" +#include "debugfs_htt_stats.h" static ssize_t ath12k_write_simulate_radar(struct file *file, const char __user *user_buf, @@ -80,11 +81,27 @@ void ath12k_debugfs_register(struct ath12k *ar) /* Create a symlink under ieee80211/phy* */ scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev); - debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf); + ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k", + hw->wiphy->debugfsdir, + buf); if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_pdev, ar, &fops_simulate_radar); } + + ath12k_debugfs_htt_stats_register(ar); +} + +void ath12k_debugfs_unregister(struct ath12k *ar) +{ + if (!ar->debug.debugfs_pdev) + return; + + /* Remove symlink under ieee80211/phy* */ + debugfs_remove(ar->debug.debugfs_pdev_symlink); + debugfs_remove_recursive(ar->debug.debugfs_pdev); + ar->debug.debugfs_pdev_symlink = NULL; + ar->debug.debugfs_pdev = NULL; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h index a62f2a550b23..8d64ba03aa9a 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.h +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -11,7 +11,7 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab); void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); void ath12k_debugfs_register(struct ath12k *ar); - +void ath12k_debugfs_unregister(struct ath12k *ar); #else static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) { @@ -25,6 +25,10 @@ static inline void ath12k_debugfs_register(struct ath12k *ar) { } +static inline void ath12k_debugfs_unregister(struct ath12k *ar) +{ +} + #endif /* CONFIG_ATH12K_DEBUGFS */ #endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c new file mode 100644 index 000000000000..ce80e7b5175b --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -0,0 +1,1540 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/vmalloc.h> +#include "core.h" +#include "debug.h" +#include "debugfs_htt_stats.h" +#include "dp_tx.h" +#include "dp_rx.h" + +static u32 +print_array_to_buf(u8 *buf, u32 offset, const char *header, + const __le32 *array, u32 array_len, const char *footer) +{ + int index = 0; + u8 i; + + if (header) { + index += scnprintf(buf + offset, + ATH12K_HTT_STATS_BUF_SIZE - offset, + "%s = ", header); + } + for (i = 0; i < array_len; i++) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + " %u:%u,", i, le32_to_cpu(array[i])); + } + /* To overwrite the last trailing comma */ + index--; + *(buf + offset + index) = '\0'; + + if (footer) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + "%s", footer); + } + return index; +} + +static void +htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n", + le32_to_cpu(htt_stats_buf->comp_delivered)); + len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n", + le32_to_cpu(htt_stats_buf->self_triggers)); + len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", + le32_to_cpu(htt_stats_buf->hw_queued)); + len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", + le32_to_cpu(htt_stats_buf->hw_reaped)); + len += scnprintf(buf + len, buf_len - len, "underrun = %u\n", + le32_to_cpu(htt_stats_buf->underrun)); + len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->hw_paused)); + len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n", + le32_to_cpu(htt_stats_buf->hw_flush)); + len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n", + le32_to_cpu(htt_stats_buf->hw_filt)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_stats_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n", + le32_to_cpu(htt_stats_buf->ppdu_ok)); + len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_requed)); + len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n", + le32_to_cpu(htt_stats_buf->tx_xretry)); + len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n", + le32_to_cpu(htt_stats_buf->data_rc)); + len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry)); + len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n", + le32_to_cpu(htt_stats_buf->illgl_rate_phy_err)); + len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n", + le32_to_cpu(htt_stats_buf->cont_xretry)); + len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n", + le32_to_cpu(htt_stats_buf->tx_timeout)); + len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n", + le32_to_cpu(htt_stats_buf->tx_time_dur_data)); + len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n", + le32_to_cpu(htt_stats_buf->pdev_resets)); + len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n", + le32_to_cpu(htt_stats_buf->phy_underrun)); + len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n", + le32_to_cpu(htt_stats_buf->txop_ovf)); + len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted)); + len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n", + le32_to_cpu(htt_stats_buf->seq_failed_queueing)); + len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n", + le32_to_cpu(htt_stats_buf->seq_completed)); + len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n", + le32_to_cpu(htt_stats_buf->seq_restarted)); + len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_txop_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_cancel)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n", + le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted)); + len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->seq_switch_hw_paused)); + len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_posted_dsr)); + len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted_isr)); + len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n", + le32_to_cpu(htt_stats_buf->seq_ctrl_cached)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries)); + len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_sw_flush)); + len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_hw_filter)); + len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_truncated)); + len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_expired)); + len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed)); + len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_data_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_data_freed)); + len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_tried)); + len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->isr_wait_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_low)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_high)); + len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt)); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + HTT_TX_PDEV_MAX_URRN_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_URRN_TLV:\n"); + + len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_FLUSH_TLV:\n"); + + len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf; + char *mode; + u8 j, hw_mode, i, str_buf_len; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 stats_value; + u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST; + u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS; + char str_buf[ATH12K_HTT_MAX_STRING_LEN]; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + hw_mode = le32_to_cpu(htt_stats_buf->hw_mode); + + switch (hw_mode) { + case ATH12K_HTT_STATS_HWMODE_AC: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ac"; + break; + case ATH12K_HTT_STATS_HWMODE_AX: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ax"; + break; + case ATH12K_HTT_STATS_HWMODE_BE: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "be"; + break; + default: + return; + } + + for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) { + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode, + ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status + [i * max_sched + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n", + mode, ((i + 1) * 4), str_buf); + } + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_hist_status", + htt_stats_buf->sifs_hist_status, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n"); + len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype", + htt_stats_buf->fw_tx_mgmt_subtype, + ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n", + le32_to_cpu(htt_stats_buf->current_timestamp)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID)); + len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n", + le32_to_cpu(htt_stats_buf->sched_policy)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_posted_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_compl_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full)); + len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmd_post_failure)); + len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n", + le32_to_cpu(htt_stats_buf->num_active_tids)); + len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n", + le32_to_cpu(htt_stats_buf->num_ps_schedules)); + len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmds_pending)); + len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_register)); + len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_unregister)); + len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n", + le32_to_cpu(htt_stats_buf->num_qstats_queried)); + len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n", + le32_to_cpu(htt_stats_buf->qstats_update_pending)); + len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full)); + len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n", + le32_to_cpu(htt_stats_buf->notify_sched)); + len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n", + le32_to_cpu(htt_stats_buf->dur_based_sendn_term)); + len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_notify2_sched)); + len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched)); + len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_delay_timeout_sched)); + len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay)); + len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_no_delay)); + len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n", + le32_to_cpu(htt_stats_buf->num_supercycles)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n", + le32_to_cpu(htt_stats_buf->num_subcycles_with_sort)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n", + le32_to_cpu(htt_stats_buf->num_subcycles_no_sort)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_posted", + htt_stats_buf->sched_cmd_posted, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_reaped", + htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_order_su", + htt_stats_buf->sched_order_su, + sched_order_su_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_ineligibility_num_entries = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n"); + len += print_array_to_buf(buf, len, "sched_ineligibility", + htt_stats_buf->sched_ineligibility, + sched_ineligibility_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n"); + len += print_array_to_buf(buf, len, "supercycle_triggers", + htt_stats_buf->supercycle_triggers, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_buf)) + return; + + mac_id_word = le32_to_cpu(htt_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->tx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n", + le32_to_cpu(htt_buf->rx_abort)); + len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->rx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n", + le32_to_cpu(htt_buf->rx_flush_cnt)); + len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n", + le32_to_cpu(htt_buf->warm_reset)); + len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n", + le32_to_cpu(htt_buf->cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n", + le32_to_cpu(htt_buf->mac_only_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n", + le32_to_cpu(htt_buf->mac_sfm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr)); + len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n", + le32_to_cpu(htt_buf->fw_rx_rings_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n", + le32_to_cpu(htt_buf->tx_flush)); + len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n", + le32_to_cpu(htt_buf->tx_glb_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n", + le32_to_cpu(htt_buf->tx_txq_reset)); + len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n", + le32_to_cpu(htt_buf->rx_timeout_reset)); + + len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n"); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hw_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war)); + + len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_mac_hang_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_known_sig_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_rx_busy_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt)); + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_leak_prevention_done = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_saved_cnt = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2reo_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2fw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2wbm_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_drain_ok_mac_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_ok_mac_not_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_prerequisite_invld = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_skip_for_non_lmac_reset = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n", + le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", + htt_stats_buf->hw_intr_name); + len += scnprintf(buf + len, buf_len - len, "mask = %u\n", + le32_to_cpu(htt_stats_buf->mask)); + len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", + le32_to_cpu(htt_stats_buf->count)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n", + le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id)); + len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped)); + len += scnprintf(buf + len, buf_len - len, + "hwsch_misaligned_offset_received = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received)); + len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_reset_count)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war)); + len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response)); + len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_selfgen_response)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n", + le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 fixed_len, array_len; + u8 i, array_words; + u32 mac_id; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); + fixed_len = sizeof(*htt_stats_buf); + array_len = tag_len - fixed_len; + array_words = array_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); + + for (i = 0; i < array_words; i++) { + len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n", + i, le32_to_cpu(htt_stats_buf->hw_wars[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n", + le32_to_cpu(htt_stats_buf->max_cmdq_id)); + len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n", + le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl)); + len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n", + le32_to_cpu(htt_stats_buf->add_msdu)); + len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_empty)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty)); + len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n", + le32_to_cpu(htt_stats_buf->drop_notification)); + len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n", + le32_to_cpu(htt_stats_buf->desc_threshold)); + len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status)); + len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n", + le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates = %u\n", + le32_to_cpu(htt_stats_buf->msduq_timestamp_updates)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_tac = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac)); + len += scnprintf(buf + len, buf_len - len, + "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_tids)); + len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_inactive_tids)); + len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows)); + len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n", + le32_to_cpu(htt_stats_buf->high_prio_q_not_empty)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n", + le32_to_cpu(htt_stats_buf->add_msdu_failure)); + + len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n", + le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err)); + len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n", + le32_to_cpu(htt_stats_buf->tqm_soft_reset)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_total_num_in_use_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_time_to_tqm_hang_delta_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_su_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_other_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_type = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_cfg = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "gen_mpdu_end_reason", + htt_stats_buf->gen_mpdu_end_reason, num_elements, + "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_end_reason", + htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist", + htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl)); + len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n", + le32_to_cpu(htt_stats_buf->send_bar)); + len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n", + le32_to_cpu(htt_stats_buf->bar_sync)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu)); + len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", + le32_to_cpu(htt_stats_buf->sync_cmd)); + len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", + le32_to_cpu(htt_stats_buf->write_cmd)); + len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_trigger)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_list_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd)); + len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", + le32_to_cpu(htt_stats_buf->flush_cache_cmd)); + len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", + le32_to_cpu(htt_stats_buf->update_mpduq_cmd)); + len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n", + le32_to_cpu(htt_stats_buf->enqueue)); + len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n", + le32_to_cpu(htt_stats_buf->enqueue_notify)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_at_head)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify2)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify2)); + + stats_req->buf_len = len; +} + +static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, + u16 tag, u16 len, const void *tag_buf, + void *user_data) +{ + struct debug_htt_stats_req *stats_req = user_data; + + switch (tag) { + case HTT_STATS_TX_PDEV_CMN_TAG: + htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_HIST_TAG: + htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG: + htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_MU_PPDU_DIST_TAG: + htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SCHED_CMN_TAG: + ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: + ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: + ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: + ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG: + ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG: + ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG: + ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_HW_PDEV_ERRS_TAG: + ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_INTR_MISC_TAG: + ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_WHAL_TX_TAG: + ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_WAR_TAG: + ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_CMN_TAG: + ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_ERROR_STATS_TAG: + ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_GEN_MPDU_TAG: + ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_PDEV_TAG: + ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req); + break; + default: + break; + } + + return 0; +} + +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_htt_extd_stats_msg *msg; + struct debug_htt_stats_req *stats_req; + struct ath12k *ar; + u32 len, pdev_id, stats_info; + u64 cookie; + int ret; + bool send_completion = false; + + msg = (struct ath12k_htt_extd_stats_msg *)skb->data; + cookie = le64_to_cpu(msg->cookie); + + if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) != + ATH12K_HTT_STATS_MAGIC_VALUE) { + ath12k_warn(ab, "received invalid htt ext stats event\n"); + return; + } + + pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + if (!ar) { + ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id); + goto exit; + } + + stats_req = ar->debug.htt_stats.stats_req; + if (!stats_req) + goto exit; + + spin_lock_bh(&ar->data_lock); + + stats_info = le32_to_cpu(msg->info1); + stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE); + if (stats_req->done) + send_completion = true; + + spin_unlock_bh(&ar->data_lock); + + len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH); + if (len > skb->len) { + ath12k_warn(ab, "invalid length %d for HTT stats", len); + goto exit; + } + + ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, + ath12k_dbg_htt_ext_stats_parse, + stats_req); + if (ret) + ath12k_warn(ab, "Failed to parse tlv %d\n", ret); + + if (send_completion) + complete(&stats_req->htt_stats_rcvd); +exit: + rcu_read_unlock(); +} + +static ssize_t ath12k_read_htt_stats_type(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + char buf[32]; + size_t len; + + mutex_lock(&ar->conf_mutex); + type = ar->debug.htt_stats.type; + mutex_unlock(&ar->conf_mutex); + + len = scnprintf(buf, sizeof(buf), "%u\n", type); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath12k_write_htt_stats_type(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + unsigned int cfg_param[4] = {0}; + const int size = 32; + int num_args; + + char *buf __free(kfree) = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0], + &cfg_param[1], &cfg_param[2], &cfg_param[3]); + if (!num_args || num_args > 5) + return -EINVAL; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET || + type >= ATH12K_DBG_HTT_NUM_EXT_STATS) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ar->debug.htt_stats.type = type; + ar->debug.htt_stats.cfg_param[0] = cfg_param[0]; + ar->debug.htt_stats.cfg_param[1] = cfg_param[1]; + ar->debug.htt_stats.cfg_param[2] = cfg_param[2]; + ar->debug.htt_stats.cfg_param[3] = cfg_param[3]; + + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_htt_stats_type = { + .read = ath12k_read_htt_stats_type, + .write = ath12k_write_htt_stats_type, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath12k_debugfs_htt_stats_req(struct ath12k *ar) +{ + struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; + enum ath12k_dbg_htt_ext_stats_type type = stats_req->type; + u64 cookie; + int ret, pdev_id; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + + lockdep_assert_held(&ar->conf_mutex); + + init_completion(&stats_req->htt_stats_rcvd); + + pdev_id = ath12k_mac_get_target_pdev_id(ar); + stats_req->done = false; + stats_req->pdev_id = pdev_id; + + cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE, + ATH12K_HTT_STATS_COOKIE_MSB); + cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB); + + if (stats_req->override_cfg_param) { + cfg_params.cfg0 = stats_req->cfg_param[0]; + cfg_params.cfg1 = stats_req->cfg_param[1]; + cfg_params.cfg2 = stats_req->cfg_param[2]; + cfg_params.cfg3 = stats_req->cfg_param[3]; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + return ret; + } + if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) { + spin_lock_bh(&ar->data_lock); + if (!stats_req->done) { + stats_req->done = true; + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ar->ab, "stats request timed out\n"); + return -ETIMEDOUT; + } + spin_unlock_bh(&ar->data_lock); + } + + return 0; +} + +static int ath12k_open_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct debug_htt_stats_req *stats_req; + enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -EPERM; + + mutex_lock(&ar->conf_mutex); + + if (ah->state != ATH12K_HW_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + if (ar->debug.htt_stats.stats_req) { + ret = -EAGAIN; + goto err_unlock; + } + + stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL); + if (!stats_req) { + ret = -ENOMEM; + goto err_unlock; + } + + ar->debug.htt_stats.stats_req = stats_req; + stats_req->type = type; + stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0]; + stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1]; + stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2]; + stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3]; + stats_req->override_cfg_param = !!stats_req->cfg_param[0] || + !!stats_req->cfg_param[1] || + !!stats_req->cfg_param[2] || + !!stats_req->cfg_param[3]; + + ret = ath12k_debugfs_htt_stats_req(ar); + if (ret < 0) + goto out; + + file->private_data = stats_req; + + mutex_unlock(&ar->conf_mutex); + + return 0; +out: + kfree(stats_req); + ar->debug.htt_stats.stats_req = NULL; +err_unlock: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath12k_release_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + + mutex_lock(&ar->conf_mutex); + kfree(file->private_data); + ar->debug.htt_stats.stats_req = NULL; + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static ssize_t ath12k_read_htt_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debug_htt_stats_req *stats_req = file->private_data; + char *buf; + u32 length; + + buf = stats_req->buf; + length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE); + return simple_read_from_buffer(user_buf, count, ppos, buf, length); +} + +static const struct file_operations fops_dump_htt_stats = { + .open = ath12k_open_htt_stats, + .release = ath12k_release_htt_stats, + .read = ath12k_read_htt_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath12k_write_htt_stats_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + u8 param_pos; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS || + type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -E2BIG; + + mutex_lock(&ar->conf_mutex); + cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET; + param_pos = (type >> 5) + 1; + + switch (param_pos) { + case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES: + cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES: + cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 + + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES: + cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 + + type); + break; + default: + break; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, + ATH12K_DBG_HTT_EXT_STATS_RESET, + &cfg_params, + 0ULL); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + mutex_unlock(&ar->conf_mutex); + return ret; + } + + ar->debug.htt_stats.reset = type; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_htt_stats_reset = { + .write = ath12k_write_htt_stats_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar) +{ + debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_type); + debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev, + ar, &fops_dump_htt_stats); + debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_reset); +} diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h new file mode 100644 index 000000000000..6294a082cf8a --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef DEBUG_HTT_STATS_H +#define DEBUG_HTT_STATS_H + +#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512) +#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0) +#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32) +#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0 +#define ATH12K_HTT_STATS_SUBTYPE_MAX 16 +#define ATH12K_HTT_MAX_STRING_LEN 256 + +#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f) +#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f) +#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx)) +#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx)) + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar); + +#ifdef CONFIG_ATH12K_DEBUGFS +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb); +#else /* CONFIG_ATH12K_DEBUGFS */ +static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ +} +#endif + +/** + * DOC: target -> host extended statistics upload + * + * The following field definitions describe the format of the HTT + * target to host stats upload confirmation message. + * The message contains a cookie echoed from the HTT host->target stats + * upload request, which identifies which request the confirmation is + * for, and a single stats can span over multiple HTT stats indication + * due to the HTT message size limitation so every HTT ext stats + * indication will have tag-length-value stats information elements. + * The tag-length header for each HTT stats IND message also includes a + * status field, to indicate whether the request for the stat type in + * question was fully met, partially met, unable to be met, or invalid + * (if the stat type in question is disabled in the target). + * A Done bit 1's indicate the end of the of stats info elements. + * + * + * |31 16|15 12|11|10 8|7 5|4 0| + * |--------------------------------------------------------------| + * | reserved | msg type | + * |--------------------------------------------------------------| + * | cookie LSBs | + * |--------------------------------------------------------------| + * | cookie MSBs | + * |--------------------------------------------------------------| + * | stats entry length | rsvd | D| S | stat type | + * |--------------------------------------------------------------| + * | type-specific stats info | + * | (see debugfs_htt_stats.h) | + * |--------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: Identifies this is a extended statistics upload confirmation + * message. + * Value: 0x1c + * - COOKIE_LSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * - COOKIE_MSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * + * Stats Information Element tag-length header fields: + * - STAT_TYPE + * Bits 7:0 + * Purpose: identifies the type of statistics info held in the + * following information element + * Value: ath12k_dbg_htt_ext_stats_type + * - STATUS + * Bits 10:8 + * Purpose: indicate whether the requested stats are present + * Value: + * 0 -> The requested stats have been delivered in full + * 1 -> The requested stats have been delivered in part + * 2 -> The requested stats could not be delivered (error case) + * 3 -> The requested stat type is either not recognized (invalid) + * - DONE + * Bits 11 + * Purpose: + * Indicates the completion of the stats entry, this will be the last + * stats conf HTT segment for the requested stats type. + * Value: + * 0 -> the stats retrieval is ongoing + * 1 -> the stats retrieval is complete + * - LENGTH + * Bits 31:16 + * Purpose: indicate the stats information size + * Value: This field specifies the number of bytes of stats information + * that follows the element tag-length header. + * It is expected but not required that this length is a multiple of + * 4 bytes. + */ + +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11) +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16) + +struct ath12k_htt_extd_stats_msg { + __le32 info0; + __le64 cookie; + __le32 info1; + u8 data[]; +} __packed; + +/* htt_dbg_ext_stats_type */ +enum ath12k_dbg_htt_ext_stats_type { + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + + /* keep this last */ + ATH12K_DBG_HTT_NUM_EXT_STATS, +}; + +enum ath12k_dbg_htt_tlv_tag { + HTT_STATS_TX_PDEV_CMN_TAG = 0, + HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1, + HTT_STATS_TX_PDEV_SIFS_TAG = 2, + HTT_STATS_TX_PDEV_FLUSH_TAG = 3, + HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11, + HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12, + HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13, + HTT_STATS_TX_TQM_CMN_TAG = 14, + HTT_STATS_TX_TQM_PDEV_TAG = 15, + HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, + HTT_STATS_TX_SCHED_CMN_TAG = 37, + HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, + HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43, + HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44, + HTT_STATS_HW_INTR_MISC_TAG = 54, + HTT_STATS_HW_PDEV_ERRS_TAG = 56, + HTT_STATS_WHAL_TX_TAG = 66, + HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, + HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, + HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, + HTT_STATS_HW_WAR_TAG = 89, + HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, + HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, + HTT_STATS_MU_PPDU_DIST_TAG = 129, + + HTT_STATS_MAX_TAG, +}; + +#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0) + +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9 +#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150 + +/* MU MIMO distribution stats is a 2-dimensional array + * with dimension one denoting stats for nr4[0] or nr8[1] + */ +#define ATH12K_HTT_STATS_NUM_NR_BINS 2 +#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10 +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10 +#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9 +#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS) +#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST) + +enum ath12k_htt_tx_pdev_underrun_enum { + HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2, + HTT_TX_PDEV_MAX_URRN_STATS = 3, +}; + +enum ath12k_htt_stats_reset_cfg_param_alloc_pos { + ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1, + ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES, + ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES, +}; + +struct debug_htt_stats_req { + bool done; + bool override_cfg_param; + u8 pdev_id; + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 peer_addr[ETH_ALEN]; + struct completion htt_stats_rcvd; + u32 buf_len; + u8 buf[]; +}; + +struct ath12k_htt_tx_pdev_stats_cmn_tlv { + __le32 mac_id__word; + __le32 hw_queued; + __le32 hw_reaped; + __le32 underrun; + __le32 hw_paused; + __le32 hw_flush; + __le32 hw_filt; + __le32 tx_abort; + __le32 mpdu_requed; + __le32 tx_xretry; + __le32 data_rc; + __le32 mpdu_dropped_xretry; + __le32 illgl_rate_phy_err; + __le32 cont_xretry; + __le32 tx_timeout; + __le32 pdev_resets; + __le32 phy_underrun; + __le32 txop_ovf; + __le32 seq_posted; + __le32 seq_failed_queueing; + __le32 seq_completed; + __le32 seq_restarted; + __le32 mu_seq_posted; + __le32 seq_switch_hw_paused; + __le32 next_seq_posted_dsr; + __le32 seq_posted_isr; + __le32 seq_ctrl_cached; + __le32 mpdu_count_tqm; + __le32 msdu_count_tqm; + __le32 mpdu_removed_tqm; + __le32 msdu_removed_tqm; + __le32 mpdus_sw_flush; + __le32 mpdus_hw_filter; + __le32 mpdus_truncated; + __le32 mpdus_ack_failed; + __le32 mpdus_expired; + __le32 mpdus_seq_hw_retry; + __le32 ack_tlv_proc; + __le32 coex_abort_mpdu_cnt_valid; + __le32 coex_abort_mpdu_cnt; + __le32 num_total_ppdus_tried_ota; + __le32 num_data_ppdus_tried_ota; + __le32 local_ctrl_mgmt_enqued; + __le32 local_ctrl_mgmt_freed; + __le32 local_data_enqued; + __le32 local_data_freed; + __le32 mpdu_tried; + __le32 isr_wait_seq_posted; + + __le32 tx_active_dur_us_low; + __le32 tx_active_dur_us_high; + __le32 remove_mpdus_max_retries; + __le32 comp_delivered; + __le32 ppdu_ok; + __le32 self_triggers; + __le32 tx_time_dur_data; + __le32 seq_qdepth_repost_stop; + __le32 mu_seq_min_msdu_repost_stop; + __le32 seq_min_msdu_repost_stop; + __le32 seq_txop_repost_stop; + __le32 next_seq_cancel; + __le32 fes_offsets_err_cnt; + __le32 num_mu_peer_blacklisted; + __le32 mu_ofdma_seq_posted; + __le32 ul_mumimo_seq_posted; + __le32 ul_ofdma_seq_posted; + + __le32 thermal_suspend_cnt; + __le32 dfs_suspend_cnt; + __le32 tx_abort_suspend_cnt; + __le32 tgt_specific_opaque_txq_suspend_info; + __le32 last_suspend_reason; +} __packed; + +struct ath12k_htt_tx_pdev_stats_urrn_tlv { + DECLARE_FLEX_ARRAY(__le32, urrn_stats); +} __packed; + +struct ath12k_htt_tx_pdev_stats_flush_tlv { + DECLARE_FLEX_ARRAY(__le32, flush_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_phy_err_tlv { + DECLARE_FLEX_ARRAY(__le32, phy_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_status); +} __packed; + +struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv { + __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX]; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_hist_status); +} __packed; + +enum ath12k_htt_stats_hw_mode { + ATH12K_HTT_STATS_HWMODE_AC = 0, + ATH12K_HTT_STATS_HWMODE_AX = 1, + ATH12K_HTT_STATS_HWMODE_BE = 2, +}; + +struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv { + __le32 hw_mode; + __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS]; + __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; + __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS]; + __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; +} __packed; + +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) + +#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20 + +struct ath12k_htt_stats_tx_sched_cmn_tlv { + __le32 mac_id__word; + __le32 current_timestamp; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv { + __le32 mac_id__word; + __le32 sched_policy; + __le32 last_sched_cmd_posted_timestamp; + __le32 last_sched_cmd_compl_timestamp; + __le32 sched_2_tac_lwm_count; + __le32 sched_2_tac_ring_full; + __le32 sched_cmd_post_failure; + __le32 num_active_tids; + __le32 num_ps_schedules; + __le32 sched_cmds_pending; + __le32 num_tid_register; + __le32 num_tid_unregister; + __le32 num_qstats_queried; + __le32 qstats_update_pending; + __le32 last_qstats_query_timestamp; + __le32 num_tqm_cmdq_full; + __le32 num_de_sched_algo_trigger; + __le32 num_rt_sched_algo_trigger; + __le32 num_tqm_sched_algo_trigger; + __le32 notify_sched; + __le32 dur_based_sendn_term; + __le32 su_notify2_sched; + __le32 su_optimal_queued_msdus_sched; + __le32 su_delay_timeout_sched; + __le32 su_min_txtime_sched_delay; + __le32 su_no_delay; + __le32 num_supercycles; + __le32 num_subcycles_with_sort; + __le32 num_subcycles_no_sort; +} __packed; + +struct ath12k_htt_sched_txq_cmd_posted_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted); +} __packed; + +struct ath12k_htt_sched_txq_cmd_reaped_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped); +} __packed; + +struct ath12k_htt_sched_txq_sched_order_su_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_order_su); +} __packed; + +struct ath12k_htt_sched_txq_sched_ineligibility_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_ineligibility); +} __packed; + +enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum { + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX, +}; + +struct ath12k_htt_sched_txq_supercycle_triggers_tlv { + DECLARE_FLEX_ARRAY(__le32, supercycle_triggers); +} __packed; + +struct ath12k_htt_hw_stats_pdev_errs_tlv { + __le32 mac_id__word; + __le32 tx_abort; + __le32 tx_abort_fail_count; + __le32 rx_abort; + __le32 rx_abort_fail_count; + __le32 warm_reset; + __le32 cold_reset; + __le32 tx_flush; + __le32 tx_glb_reset; + __le32 tx_txq_reset; + __le32 rx_timeout_reset; + __le32 mac_cold_reset_restore_cal; + __le32 mac_cold_reset; + __le32 mac_warm_reset; + __le32 mac_only_reset; + __le32 phy_warm_reset; + __le32 phy_warm_reset_ucode_trig; + __le32 mac_warm_reset_restore_cal; + __le32 mac_sfm_reset; + __le32 phy_warm_reset_m3_ssr; + __le32 phy_warm_reset_reason_phy_m3; + __le32 phy_warm_reset_reason_tx_hw_stuck; + __le32 phy_warm_reset_reason_num_rx_frame_stuck; + __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy; + __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng; + __le32 phy_warm_reset_reason_mac_conv_phy_reset; + __le32 wal_rx_recovery_rst_mac_hang_cnt; + __le32 wal_rx_recovery_rst_known_sig_cnt; + __le32 wal_rx_recovery_rst_no_rx_cnt; + __le32 wal_rx_recovery_rst_no_rx_consec_cnt; + __le32 wal_rx_recovery_rst_rx_busy_cnt; + __le32 wal_rx_recovery_rst_phy_mac_hang_cnt; + __le32 rx_flush_cnt; + __le32 phy_warm_reset_reason_tx_exp_cca_stuck; + __le32 phy_warm_reset_reason_tx_consec_flsh_war; + __le32 phy_warm_reset_reason_tx_hwsch_reset_war; + __le32 phy_warm_reset_reason_hwsch_cca_wdog_war; + __le32 fw_rx_rings_reset; + __le32 rx_dest_drain_rx_descs_leak_prevented; + __le32 rx_dest_drain_rx_descs_saved_cnt; + __le32 rx_dest_drain_rxdma2reo_leak_detected; + __le32 rx_dest_drain_rxdma2fw_leak_detected; + __le32 rx_dest_drain_rxdma2wbm_leak_detected; + __le32 rx_dest_drain_rxdma1_2sw_leak_detected; + __le32 rx_dest_drain_rx_drain_ok_mac_idle; + __le32 rx_dest_drain_ok_mac_not_idle; + __le32 rx_dest_drain_prerequisite_invld; + __le32 rx_dest_drain_skip_non_lmac_reset; + __le32 rx_dest_drain_hw_fifo_notempty_post_wait; +} __packed; + +#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8 +struct ath12k_htt_hw_stats_intr_misc_tlv { + u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN]; + __le32 mask; + __le32 count; +} __packed; + +struct ath12k_htt_hw_stats_whal_tx_tlv { + __le32 mac_id__word; + __le32 last_unpause_ppdu_id; + __le32 hwsch_unpause_wait_tqm_write; + __le32 hwsch_dummy_tlv_skipped; + __le32 hwsch_misaligned_offset_received; + __le32 hwsch_reset_count; + __le32 hwsch_dev_reset_war; + __le32 hwsch_delayed_pause; + __le32 hwsch_long_delayed_pause; + __le32 sch_rx_ppdu_no_response; + __le32 sch_selfgen_response; + __le32 sch_rx_sifs_resp_trigger; +} __packed; + +struct ath12k_htt_hw_war_stats_tlv { + __le32 mac_id__word; + DECLARE_FLEX_ARRAY(__le32, hw_wars); +} __packed; + +struct ath12k_htt_tx_tqm_cmn_stats_tlv { + __le32 mac_id__word; + __le32 max_cmdq_id; + __le32 list_mpdu_cnt_hist_intvl; + __le32 add_msdu; + __le32 q_empty; + __le32 q_not_empty; + __le32 drop_notification; + __le32 desc_threshold; + __le32 hwsch_tqm_invalid_status; + __le32 missed_tqm_gen_mpdus; + __le32 tqm_active_tids; + __le32 tqm_inactive_tids; + __le32 tqm_active_msduq_flows; + __le32 msduq_timestamp_updates; + __le32 msduq_updates_mpdu_head_info_cmd; + __le32 msduq_updates_emp_to_nonemp_status; + __le32 get_mpdu_head_info_cmds_by_query; + __le32 get_mpdu_head_info_cmds_by_tac; + __le32 gen_mpdu_cmds_by_query; + __le32 high_prio_q_not_empty; +} __packed; + +struct ath12k_htt_tx_tqm_error_stats_tlv { + __le32 q_empty_failure; + __le32 q_not_empty_failure; + __le32 add_msdu_failure; + __le32 tqm_cache_ctl_err; + __le32 tqm_soft_reset; + __le32 tqm_reset_num_in_use_link_descs; + __le32 tqm_reset_num_lost_link_descs; + __le32 tqm_reset_num_lost_host_tx_buf_cnt; + __le32 tqm_reset_num_in_use_internal_tqm; + __le32 tqm_reset_num_in_use_idle_link_rng; + __le32 tqm_reset_time_to_tqm_hang_delta_ms; + __le32 tqm_reset_recovery_time_ms; + __le32 tqm_reset_num_peers_hdl; + __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt; + __le32 tqm_reset_cumm_dirty_hw_msduq_proc; + __le32 tqm_reset_flush_cache_cmd_su_cnt; + __le32 tqm_reset_flush_cache_cmd_other_cnt; + __le32 tqm_reset_flush_cache_cmd_trig_type; + __le32 tqm_reset_flush_cache_cmd_trig_cfg; + __le32 tqm_reset_flush_cmd_skp_status_null; +} __packed; + +struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason); +} __packed; + +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16 +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16 + +struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason); +} __packed; + +struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist); +} __packed; + +struct ath12k_htt_tx_tqm_pdev_stats_tlv { + __le32 msdu_count; + __le32 mpdu_count; + __le32 remove_msdu; + __le32 remove_mpdu; + __le32 remove_msdu_ttl; + __le32 send_bar; + __le32 bar_sync; + __le32 notify_mpdu; + __le32 sync_cmd; + __le32 write_cmd; + __le32 hwsch_trigger; + __le32 ack_tlv_proc; + __le32 gen_mpdu_cmd; + __le32 gen_list_cmd; + __le32 remove_mpdu_cmd; + __le32 remove_mpdu_tried_cmd; + __le32 mpdu_queue_stats_cmd; + __le32 mpdu_head_info_cmd; + __le32 msdu_flow_stats_cmd; + __le32 remove_msdu_cmd; + __le32 remove_msdu_ttl_cmd; + __le32 flush_cache_cmd; + __le32 update_mpduq_cmd; + __le32 enqueue; + __le32 enqueue_notify; + __le32 notify_mpdu_at_head; + __le32 notify_mpdu_state_valid; + __le32 sched_udp_notify1; + __le32 sched_udp_notify2; + __le32 sched_nonudp_notify1; + __le32 sched_nonudp_notify2; +} __packed; + +#endif diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 742094545089..b77497c14ac4 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -333,6 +333,7 @@ struct ath12k_dp { struct dp_srng reo_except_ring; struct dp_srng reo_cmd_ring; struct dp_srng reo_status_ring; + enum ath12k_peer_metadata_version peer_metadata_ver; struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX]; struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX]; struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index cb1f308f096b..14236d0a0c89 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -17,6 +17,7 @@ #include "dp_tx.h" #include "peer.h" #include "dp_mon.h" +#include "debugfs_htt_stats.h" #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) @@ -1268,10 +1269,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, return 0; } -static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, - int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, - const void *ptr, void *data), - void *data) +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) { const struct htt_tlv *tlv; const void *begin = ptr; @@ -1741,6 +1742,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, ath12k_htt_pull_ppdu_stats(ab, skb); break; case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + ath12k_debugfs_htt_ext_stats_handler(ab, skb); break; case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: ath12k_htt_mlo_offset_event_handler(ab, skb); @@ -2583,6 +2585,29 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, rcu_read_unlock(); } +static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, + enum ath12k_peer_metadata_version ver, + __le32 peer_metadata) +{ + switch (ver) { + default: + ath12k_warn(ab, "Unknown peer metadata version: %d", ver); + fallthrough; + case ATH12K_PEER_METADATA_V0: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V0_PEER_ID); + case ATH12K_PEER_METADATA_V1: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1_PEER_ID); + case ATH12K_PEER_METADATA_V1A: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1A_PEER_ID); + case ATH12K_PEER_METADATA_V1B: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1B_PEER_ID); + } +} + int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { @@ -2611,6 +2636,8 @@ try_again: ath12k_hal_srng_access_begin(ab, srng); while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + struct rx_mpdu_desc *mpdu_info; + struct rx_msdu_desc *msdu_info; enum hal_reo_dest_ring_push_reason push_reason; u32 cookie; @@ -2658,16 +2685,19 @@ try_again: continue; } - rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + msdu_info = &desc->rx_msdu_info; + mpdu_info = &desc->rx_mpdu_info; + + rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->mac_id = mac_id; - rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data, - RX_MPDU_DESC_META_DATA_PEER_ID); - rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0, + rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, + mpdu_info->peer_meta_data); + rxcb->tid = le32_get_bits(mpdu_info->info0, RX_MPDU_DESC_INFO0_TID); __skb_queue_tail(&msdu_list, msdu); @@ -3402,7 +3432,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, struct ath12k *ar; dma_addr_t paddr; bool is_frag; - bool drop = false; + bool drop; int pdev_id; tot_n_bufs_reaped = 0; @@ -3420,7 +3450,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, while (budget && (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + drop = false; ab->soc_stats.err_ring_pkts++; + ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, &desc_bank); if (ret) { diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index 2ff421160181..eb1f92559179 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -139,4 +139,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data); #endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index c4cfa7cf7cb9..d08c04343e90 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -1086,6 +1086,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, struct htt_ext_stats_cfg_cmd *cmd; int len = sizeof(*cmd); int ret; + u32 pdev_id; skb = ath12k_htc_alloc_skb(ab, len); if (!skb) @@ -1097,7 +1098,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; - cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + pdev_id = ath12k_mac_get_target_pdev_id(ar); + cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0); diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 02b7db06b24e..739f73370015 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -597,8 +597,30 @@ struct hal_tlv_64_hdr { #define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27) #define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28) -/* TODO revisit after meta data is concluded */ -#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) +/* Peer Metadata classification */ + +/* Version 0 */ +#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0) +#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16) + +/* Version 1 */ +#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14) +#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16) +#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24) +#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26) + +/* Version 1A */ +#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26) + +/* Version 1B */ +#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26) struct rx_mpdu_desc { __le32 info0; /* %RX_MPDU_DESC_INFO */ diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c index 2f2230f565bb..d13616bf07f4 100644 --- a/drivers/net/wireless/ath/ath12k/htc.c +++ b/drivers/net/wireless/ath/ath12k/htc.c @@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack) complete(&ab->htc_suspend); } +static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab) +{ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n"); +} + void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, struct sk_buff *skb) { @@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, ath12k_htc_suspend_complete(ab, false); break; case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + ath12k_htc_wakeup_from_suspend(ab); break; default: ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n", diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index af33bf11416b..e792eb6b249b 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -78,8 +78,6 @@ #define TARGET_NUM_WDS_ENTRIES 32 #define TARGET_DMA_BURST_SIZE 1 #define TARGET_RX_BATCHMODE 1 -#define TARGET_RX_PEER_METADATA_VER_V1A 2 -#define TARGET_RX_PEER_METADATA_VER_V1B 3 #define TARGET_EMA_MAX_PROFILE_PERIOD 8 #define ATH12K_HW_DEFAULT_QUEUE 0 diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index bd3e021e2a81..8106297f0bc1 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -6,6 +6,7 @@ #include <net/mac80211.h> #include <linux/etherdevice.h> + #include "mac.h" #include "core.h" #include "debug.h" @@ -15,6 +16,8 @@ #include "dp_rx.h" #include "peer.h" #include "debugfs.h" +#include "hif.h" +#include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -670,6 +673,82 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, return NULL; } +static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up) + return arvif; + } + + return NULL; +} + +static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) +{ + switch (band1) { + case NL80211_BAND_2GHZ: + if (band2 & WMI_HOST_WLAN_2G_CAP) + return true; + break; + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + if (band2 & WMI_HOST_WLAN_5G_CAP) + return true; + break; + default: + return false; + } + + return false; +} + +static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ieee80211_vif *vif = arvif->vif; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 pdev_id = ab->fw_pdev[0].pdev_id; + int i; + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return pdev_id; + + band = def.chan->band; + + for (i = 0; i < ab->fw_pdev_count; i++) { + if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands)) + return ab->fw_pdev[i].pdev_id; + } + + return pdev_id; +} + +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + struct ath12k_base *ab = ar->ab; + + if (!ab->hw_params->single_pdev_only) + return ar->pdev->pdev_id; + + arvif = ath12k_mac_get_vif_up(ar); + + /* fw_pdev array has pdev ids derived from phy capability + * service ready event (pdev_and_hw_link_ids). + * If no vif is active, return default first index. + */ + if (!arvif) + return ar->ab->fw_pdev[0].pdev_id; + + /* If active vif is found, return the pdev id matching chandef band */ + return ath12k_mac_get_target_pdev_id_from_vif(arvif); +} + static void ath12k_pdev_caps_update(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -2050,7 +2129,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, { const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; int i; - u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; + u8 ampdu_factor, max_nss; + u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; u16 mcs_160_map, mcs_80_map; bool support_160; u16 v; @@ -2255,9 +2336,6 @@ static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap, const struct ieee80211_he_6ghz_capa *he_6ghz_capa, int *smps) { - if (!ht_cap->ht_supported && !he_6ghz_capa->capa) - return -EOPNOTSUPP; - if (ht_cap->ht_supported) *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS); else @@ -2277,6 +2355,9 @@ static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; + if (!ht_cap->ht_supported && !he_6ghz_capa->capa) + return; + if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps)) return; @@ -2756,6 +2837,9 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, { int smps, ret = 0; + if (!ht_cap->ht_supported && !he_6ghz_capa) + return 0; + ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps); if (ret < 0) return ret; @@ -2834,6 +2918,7 @@ static void ath12k_bss_assoc(struct ath12k *ar, } arvif->is_up = true; + arvif->rekey_data.enable_offload = false; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", @@ -2881,6 +2966,8 @@ static void ath12k_bss_disassoc(struct ath12k *ar, arvif->is_up = false; + memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); + cancel_delayed_work(&arvif->connection_loss_work); } @@ -3372,7 +3459,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, static struct ath12k* ath12k_mac_select_scan_device(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_scan_request *req) + u32 center_freq) { struct ath12k_hw *ah = hw->priv; enum nl80211_band band; @@ -3389,9 +3476,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw, * split the hw request and perform multiple scans */ - if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ) + if (center_freq < ATH12K_MIN_5G_FREQ) band = NL80211_BAND_2GHZ; - else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ) + else if (center_freq < ATH12K_MIN_6G_FREQ) band = NL80211_BAND_5GHZ; else band = NL80211_BAND_6GHZ; @@ -3591,7 +3678,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, /* Since the targeted scan device could depend on the frequency * requested in the hw_req, select the corresponding radio */ - ar = ath12k_mac_select_scan_device(hw, vif, hw_req); + ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq); if (!ar) return -EINVAL; @@ -4087,6 +4174,11 @@ static int ath12k_station_assoc(struct ath12k *ar, ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + if (peer_arg.peer_nss < 1) { + ath12k_warn(ar->ab, + "invalid peer NSS %d\n", peer_arg.peer_nss); + return -EINVAL; + } ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -5834,28 +5926,6 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) /* TODO: Need to support new monitor mode */ } -static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) -{ - int recovery_start_count; - - if (!ab->is_reset) - return; - - recovery_start_count = atomic_inc_return(&ab->recovery_start_count); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); - - if (recovery_start_count == ab->num_radios) { - complete(&ab->recovery_start); - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); - } - - ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); - - wait_for_completion_timeout(&ab->reconfigure_complete, - ATH12K_RECONFIGURE_TIMEOUT_HZ); -} - static int ath12k_mac_start(struct ath12k *ar) { struct ath12k_hw *ah = ar->ah; @@ -5987,7 +6057,6 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) break; case ATH12K_HW_STATE_RESTARTING: ah->state = ATH12K_HW_STATE_RESTARTED; - ath12k_mac_wait_reconfigure(ah->ab); break; case ATH12K_HW_STATE_RESTARTED: case ATH12K_HW_STATE_WEDGED: @@ -8314,10 +8383,6 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (!sband) sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; - if (!sband || idx >= sband->n_channels) { - idx -= sband->n_channels; - sband = NULL; - } if (!sband || idx >= sband->n_channels) return -ENOENT; @@ -8416,12 +8481,68 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k_wmi_scan_req_arg arg; - struct ath12k *ar; + struct ath12k *ar, *prev_ar; u32 scan_time_msec; + bool create = true; int ret; - ar = ath12k_ah_to_ar(ah, 0); + if (ah->num_radio == 1) { + WARN_ON(!arvif->is_created); + ar = ath12k_ah_to_ar(ah, 0); + goto scan; + } + + ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq); + if (!ar) + return -EINVAL; + + /* If the vif is already assigned to a specific vdev of an ar, + * check whether its already started, vdev which is started + * are not allowed to switch to a new radio. + * If the vdev is not started, but was earlier created on a + * different ar, delete that vdev and create a new one. We don't + * delete at the scan stop as an optimization to avoid redundant + * delete-create vdev's for the same ar, in case the request is + * always on the same band for the vif + */ + if (arvif->is_created) { + if (WARN_ON(!arvif->ar)) + return -EINVAL; + + if (ar != arvif->ar && arvif->is_started) + return -EBUSY; + if (ar != arvif->ar) { + /* backup the previously used ar ptr, since the vdev delete + * would assign the arvif->ar to NULL after the call + */ + prev_ar = arvif->ar; + mutex_lock(&prev_ar->conf_mutex); + ret = ath12k_mac_vdev_delete(prev_ar, vif); + mutex_unlock(&prev_ar->conf_mutex); + if (ret) { + ath12k_warn(prev_ar->ab, + "unable to delete scan vdev for roc: %d\n", + ret); + return ret; + } + } else { + create = false; + } + } + + if (create) { + mutex_lock(&ar->conf_mutex); + ret = ath12k_mac_vdev_create(ar, vif); + mutex_unlock(&ar->conf_mutex); + if (ret) { + ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", + ret); + return -EINVAL; + } + } + +scan: mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); @@ -8503,6 +8624,40 @@ exit: return ret; } +static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n", + arvif->vdev_id); + + mutex_lock(&ar->conf_mutex); + + memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); + memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); + + /* The supplicant works on big-endian, the firmware expects it on + * little endian. + */ + rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); + + arvif->rekey_data.enable_offload = true; + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL, + rekey_data->kck, NL80211_KCK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL, + rekey_data->kck, NL80211_KEK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL, + &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); + + mutex_unlock(&ar->conf_mutex); +} + static const struct ieee80211_ops ath12k_ops = { .tx = ath12k_mac_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, @@ -8518,6 +8673,7 @@ static const struct ieee80211_ops ath12k_ops = { .hw_scan = ath12k_mac_op_hw_scan, .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, .set_key = ath12k_mac_op_set_key, + .set_rekey_data = ath12k_mac_op_set_rekey_data, .sta_state = ath12k_mac_op_sta_state, .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, .sta_rc_update = ath12k_mac_op_sta_rc_update, @@ -8539,6 +8695,12 @@ static const struct ieee80211_ops ath12k_ops = { .sta_statistics = ath12k_mac_op_sta_statistics, .remain_on_channel = ath12k_mac_op_remain_on_channel, .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, + +#ifdef CONFIG_PM + .suspend = ath12k_wow_op_suspend, + .resume = ath12k_wow_op_resume, + .set_wakeup = ath12k_wow_op_set_wakeup, +#endif }; static void ath12k_mac_update_ch_list(struct ath12k *ar, @@ -8836,8 +8998,10 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) struct ath12k *ar; int i; - for_each_ar(ah, ar, i) + for_each_ar(ah, ar, i) { cancel_work_sync(&ar->regd_update_work); + ath12k_debugfs_unregister(ar); + } ieee80211_unregister_hw(hw); @@ -8901,6 +9065,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; u8 *mac_addr = NULL; + u8 mbssid_max_interfaces = 0; wiphy->max_ap_assoc_sta = 0; @@ -8944,6 +9109,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) mac_addr = ar->mac_addr; else mac_addr = ab->mac_addr; + + mbssid_max_interfaces += TARGET_NUM_VDEVS; } wiphy->available_antennas_rx = antennas_rx; @@ -9036,7 +9203,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); - wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS; + wiphy->mbssid_max_interfaces = mbssid_max_interfaces; wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; if (is_6ghz) { @@ -9056,6 +9223,24 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); } + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; + wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; + wiphy->max_sched_scan_plan_interval = + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; + wiphy->max_sched_scan_plan_iterations = + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; + wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + } + + ret = ath12k_wow_init(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to init wow: %d\n", ret); + goto err_free_if_combs; + } + ret = ieee80211_register_hw(hw); if (ret) { ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); @@ -9077,13 +9262,16 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); goto err_unregister_hw; } - } - ath12k_debugfs_register(ar); + ath12k_debugfs_register(ar); + } return 0; err_unregister_hw: + for_each_ar(ah, ar, i) + ath12k_debugfs_unregister(ar); + ieee80211_unregister_hw(hw); err_free_if_combs: @@ -9216,7 +9404,6 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, ah = ath12k_hw_to_ah(hw); ah->hw = hw; - ah->ab = ab; ah->num_radio = num_pdev_map; mutex_init(&ah->hw_mutex); @@ -9307,3 +9494,34 @@ err: return ret; } + +int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct wmi_sta_keepalive_arg arg = {}; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) + return 0; + + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = method; + arg.interval = interval; + + ret = ath12k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 69fd282b9dd3..5efbb6822628 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -9,6 +9,7 @@ #include <net/mac80211.h> #include <net/cfg80211.h> +#include "wmi.h" struct ath12k; struct ath12k_base; @@ -81,5 +82,9 @@ int ath12k_mac_rfkill_config(struct ath12k *ar); int ath12k_mac_wait_tx_complete(struct ath12k *ar); void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb); void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id); +int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval); +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); #endif diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index d6e1d1398cdb..9f6be557365e 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -233,7 +233,7 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) - config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B; + config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; } void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -3498,7 +3498,7 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); - wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver, + wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); @@ -3717,6 +3717,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.res_cfg.is_reg_cc_ext_event_supported = true; ab->hw_params->wmi_init(ab, &arg.res_cfg); + ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; arg.num_mem_chunks = wmi_ab->num_mem_chunks; arg.hw_mode_id = wmi_ab->preferred_hw_mode; @@ -3728,6 +3729,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); + ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } @@ -7030,6 +7033,116 @@ exit: kfree(tb); } +static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + const struct wmi_wow_ev_pg_fault_param *pf_param; + const struct wmi_wow_ev_param *param; + struct wmi_wow_ev_arg *arg = data; + int pf_len; + + switch (tag) { + case WMI_TAG_WOW_EVENT_INFO: + param = ptr; + arg->wake_reason = le32_to_cpu(param->wake_reason); + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", + arg->wake_reason, wow_reason(arg->wake_reason)); + break; + + case WMI_TAG_ARRAY_BYTE: + if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { + pf_param = ptr; + pf_len = le32_to_cpu(pf_param->len); + if (pf_len > len - sizeof(pf_len) || + pf_len < 0) { + ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", + pf_len); + return -EINVAL; + } + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", + pf_len); + ath12k_dbg_dump(ab, ATH12K_DBG_WMI, + "wow_reason_page_fault packet present", + "wow_pg_fault ", + pf_param->data, + pf_len); + } + break; + default: + break; + } + + return 0; +} + +static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_wow_ev_arg arg = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_wow_wakeup_host_parse, + &arg); + if (ret) { + ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", + ret); + return; + } + + complete(&ab->wow.wakeup_completed); +} + +static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_gtk_offload_status_event *ev; + struct ath12k_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + rcu_read_lock(); + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); + if (!arvif) { + rcu_read_unlock(); + ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", + le32_to_cpu(ev->vdev_id)); + kfree(tb); + return; + } + + replay_ctr = le64_to_cpu(ev->replay_ctr); + arvif->rekey_data.replay_ctr = replay_ctr; + ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", + le32_to_cpu(ev->refresh_cnt), replay_ctr); + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_ATOMIC); + + rcu_read_unlock(); + + kfree(tb); +} + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7150,6 +7263,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_DIAG_EVENTID: ath12k_wmi_diag_event(ab, skb); break; + case WMI_WOW_WAKEUP_HOST_EVENTID: + ath12k_wmi_event_wow_wakeup_host(ab, skb); + break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath12k_wmi_gtk_offload_status_event(ab, skb); + break; /* TODO: Add remaining events */ default: ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -7359,3 +7478,608 @@ void ath12k_wmi_detach(struct ath12k_base *ab) ath12k_wmi_free_dbring_caps(ab); } + +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); + + /* Set all modes in case of disable */ + if (arg->enable) + cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); + else + cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi hw data filter enable %d filter_bitmap 0x%x\n", + arg->enable, arg->hw_filter_bitmap); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) +{ + struct wmi_wow_host_wakeup_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, + sizeof(*cmd)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); +} + +int ath12k_wmi_wow_enable(struct ath12k *ar) +{ + struct wmi_wow_enable_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_enable_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, + sizeof(*cmd)); + + cmd->enable = cpu_to_le32(1); + cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); +} + +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->is_add = cpu_to_le32(enable); + cmd->event_bitmap = cpu_to_le32((1 << event)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern_params *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = ptr; + bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, + sizeof(*bitmap)); + memcpy(bitmap->patternbuf, pattern, pattern_len); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + bitmap->pattern_offset = cpu_to_le32(pattern_offset); + bitmap->pattern_len = cpu_to_le32(pattern_len); + bitmap->bitmask_len = cpu_to_le32(pattern_len); + bitmap->pattern_id = cpu_to_le32(pattern_id); + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", + vdev_id, pattern_id, pattern_offset, pattern_len); + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", + bitmap->patternbuf, pattern_len); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", + bitmap->bitmaskbuf, pattern_len); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} + +static struct sk_buff * +ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno) +{ + struct nlo_configured_params *nlo_list; + size_t len, nlo_list_len, channel_list_len; + struct wmi_wow_nlo_config_cmd *cmd; + __le32 *channel_list; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_params(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(pno->vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); + cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); + + if (pno->do_passive_scan) + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); + + cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); + cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); + cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); + cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_params(nlo_list) */ + cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = ptr; + for (i = 0; i < pno->uc_networks_count; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, + sizeof(*nlo_list)); + + nlo_list[i].ssid.valid = cpu_to_le32(1); + nlo_list[i].ssid.ssid.ssid_len = + cpu_to_le32(pno->a_networks[i].ssid.ssid_len); + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = cpu_to_le32(1); + nlo_list[i].rssi_cond.rssi = + cpu_to_le32(pno->a_networks[i].rssi_threshold); + } + + nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); + nlo_list[i].bcast_nw_type.bcast_nw_type = + cpu_to_le32(pno->a_networks[i].bcast_nw_type); + } + + ptr += nlo_list_len; + cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); + ptr += sizeof(*tlv); + channel_list = ptr; + + for (i = 0; i < pno->a_networks[0].channel_count; i++) + channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + +static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_params *ns; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = buf_ptr; + ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, + sizeof(*ns)); + + if (enable) { + if (i < ns_cnt) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + + if (offload->ipv6_type[i]) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + + if (!is_zero_ether_addr(ns->target_mac.addr)) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable) +{ + struct wmi_arp_offload_params *arp; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = buf_ptr; + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, + sizeof(*arp)); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable) +{ + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_params); + } + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = buf_ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, + sizeof(*cmd)); + cmd->flags = cpu_to_le32(0); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); + + buf_ptr += sizeof(*cmd); + + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_vif *arvif, bool enable) +{ + struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + __le64 replay_ctr; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + + if (enable) { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + } else { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct wmi_sta_keepalive_arp_resp_params *arp; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enabled = cpu_to_le32(arg->enabled); + cmd->interval = cpu_to_le32(arg->interval); + cmd->method = cpu_to_le32(arg->method); + + arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, + sizeof(*arp)); + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); + arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index c2b86e187a03..f1f52175a52b 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -24,6 +24,7 @@ struct ath12k_base; struct ath12k; +struct ath12k_vif; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -2293,6 +2294,13 @@ struct ath12k_wmi_host_mem_chunk_arg { u32 req_id; }; +enum ath12k_peer_metadata_version { + ATH12K_PEER_METADATA_V0, + ATH12K_PEER_METADATA_V1, + ATH12K_PEER_METADATA_V1A, + ATH12K_PEER_METADATA_V1B +}; + struct ath12k_wmi_resource_config_arg { u32 num_vdevs; u32 num_peers; @@ -2355,10 +2363,10 @@ struct ath12k_wmi_resource_config_arg { u32 sched_params; u32 twt_ap_pdev_count; u32 twt_ap_sta_count; - bool is_reg_cc_ext_event_supported; - u8 dp_peer_meta_data_ver; + enum ath12k_peer_metadata_version peer_metadata_ver; u32 ema_max_vap_cnt; u32 ema_max_profile_period; + bool is_reg_cc_ext_event_supported; }; struct ath12k_wmi_init_cmd_arg { @@ -4902,6 +4910,556 @@ struct wmi_twt_disable_event { __le32 status; } __packed; +/* WOW structures */ +enum wmi_wow_wakeup_event { + WOW_BMISS_EVENT = 0, + WOW_BETTER_AP_EVENT, + WOW_DEAUTH_RECVD_EVENT, + WOW_MAGIC_PKT_RECVD_EVENT, + WOW_GTK_ERR_EVENT, + WOW_FOURWAY_HSHAKE_EVENT, + WOW_EAPOL_RECVD_EVENT, + WOW_NLO_DETECTED_EVENT, + WOW_DISASSOC_RECVD_EVENT, + WOW_PATTERN_MATCH_EVENT, + WOW_CSA_IE_EVENT, + WOW_PROBE_REQ_WPS_IE_EVENT, + WOW_AUTH_REQ_EVENT, + WOW_ASSOC_REQ_EVENT, + WOW_HTT_EVENT, + WOW_RA_MATCH_EVENT, + WOW_HOST_AUTO_SHUTDOWN_EVENT, + WOW_IOAC_MAGIC_EVENT, + WOW_IOAC_SHORT_EVENT, + WOW_IOAC_EXTEND_EVENT, + WOW_IOAC_TIMER_EVENT, + WOW_DFS_PHYERR_RADAR_EVENT, + WOW_BEACON_EVENT, + WOW_CLIENT_KICKOUT_EVENT, + WOW_EVENT_MAX, +}; + +enum wmi_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + +#define C2S(x) case x: return #x + +static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev) +{ + switch (ev) { + C2S(WOW_BMISS_EVENT); + C2S(WOW_BETTER_AP_EVENT); + C2S(WOW_DEAUTH_RECVD_EVENT); + C2S(WOW_MAGIC_PKT_RECVD_EVENT); + C2S(WOW_GTK_ERR_EVENT); + C2S(WOW_FOURWAY_HSHAKE_EVENT); + C2S(WOW_EAPOL_RECVD_EVENT); + C2S(WOW_NLO_DETECTED_EVENT); + C2S(WOW_DISASSOC_RECVD_EVENT); + C2S(WOW_PATTERN_MATCH_EVENT); + C2S(WOW_CSA_IE_EVENT); + C2S(WOW_PROBE_REQ_WPS_IE_EVENT); + C2S(WOW_AUTH_REQ_EVENT); + C2S(WOW_ASSOC_REQ_EVENT); + C2S(WOW_HTT_EVENT); + C2S(WOW_RA_MATCH_EVENT); + C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT); + C2S(WOW_IOAC_MAGIC_EVENT); + C2S(WOW_IOAC_SHORT_EVENT); + C2S(WOW_IOAC_EXTEND_EVENT); + C2S(WOW_IOAC_TIMER_EVENT); + C2S(WOW_DFS_PHYERR_RADAR_EVENT); + C2S(WOW_BEACON_EVENT); + C2S(WOW_CLIENT_KICKOUT_EVENT); + C2S(WOW_EVENT_MAX); + default: + return NULL; + } +} + +enum wmi_wow_wake_reason { + WOW_REASON_UNSPECIFIED = -1, + WOW_REASON_NLOD = 0, + WOW_REASON_AP_ASSOC_LOST, + WOW_REASON_LOW_RSSI, + WOW_REASON_DEAUTH_RECVD, + WOW_REASON_DISASSOC_RECVD, + WOW_REASON_GTK_HS_ERR, + WOW_REASON_EAP_REQ, + WOW_REASON_FOURWAY_HS_RECV, + WOW_REASON_TIMER_INTR_RECV, + WOW_REASON_PATTERN_MATCH_FOUND, + WOW_REASON_RECV_MAGIC_PATTERN, + WOW_REASON_P2P_DISC, + WOW_REASON_WLAN_HB, + WOW_REASON_CSA_EVENT, + WOW_REASON_PROBE_REQ_WPS_IE_RECV, + WOW_REASON_AUTH_REQ_RECV, + WOW_REASON_ASSOC_REQ_RECV, + WOW_REASON_HTT_EVENT, + WOW_REASON_RA_MATCH, + WOW_REASON_HOST_AUTO_SHUTDOWN, + WOW_REASON_IOAC_MAGIC_EVENT, + WOW_REASON_IOAC_SHORT_EVENT, + WOW_REASON_IOAC_EXTEND_EVENT, + WOW_REASON_IOAC_TIMER_EVENT, + WOW_REASON_ROAM_HO, + WOW_REASON_DFS_PHYERR_RADADR_EVENT, + WOW_REASON_BEACON_RECV, + WOW_REASON_CLIENT_KICKOUT_EVENT, + WOW_REASON_PAGE_FAULT = 0x3a, + WOW_REASON_DEBUG_TEST = 0xFF, +}; + +static inline const char *wow_reason(enum wmi_wow_wake_reason reason) +{ + switch (reason) { + C2S(WOW_REASON_UNSPECIFIED); + C2S(WOW_REASON_NLOD); + C2S(WOW_REASON_AP_ASSOC_LOST); + C2S(WOW_REASON_LOW_RSSI); + C2S(WOW_REASON_DEAUTH_RECVD); + C2S(WOW_REASON_DISASSOC_RECVD); + C2S(WOW_REASON_GTK_HS_ERR); + C2S(WOW_REASON_EAP_REQ); + C2S(WOW_REASON_FOURWAY_HS_RECV); + C2S(WOW_REASON_TIMER_INTR_RECV); + C2S(WOW_REASON_PATTERN_MATCH_FOUND); + C2S(WOW_REASON_RECV_MAGIC_PATTERN); + C2S(WOW_REASON_P2P_DISC); + C2S(WOW_REASON_WLAN_HB); + C2S(WOW_REASON_CSA_EVENT); + C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV); + C2S(WOW_REASON_AUTH_REQ_RECV); + C2S(WOW_REASON_ASSOC_REQ_RECV); + C2S(WOW_REASON_HTT_EVENT); + C2S(WOW_REASON_RA_MATCH); + C2S(WOW_REASON_HOST_AUTO_SHUTDOWN); + C2S(WOW_REASON_IOAC_MAGIC_EVENT); + C2S(WOW_REASON_IOAC_SHORT_EVENT); + C2S(WOW_REASON_IOAC_EXTEND_EVENT); + C2S(WOW_REASON_IOAC_TIMER_EVENT); + C2S(WOW_REASON_ROAM_HO); + C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT); + C2S(WOW_REASON_BEACON_RECV); + C2S(WOW_REASON_CLIENT_KICKOUT_EVENT); + C2S(WOW_REASON_PAGE_FAULT); + C2S(WOW_REASON_DEBUG_TEST); + default: + return NULL; + } +} + +#undef C2S + +#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148 +#define WOW_DEFAULT_BITMASK_SIZE 148 + +#define WOW_MIN_PATTERN_SIZE 1 +#define WOW_MAX_PATTERN_SIZE 148 +#define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) + +struct wmi_wow_bitmap_pattern_params { + __le32 tlv_header; + u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE]; + u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE]; + __le32 pattern_offset; + __le32 pattern_len; + __le32 bitmask_len; + __le32 pattern_id; +} __packed; + +struct wmi_wow_add_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +struct wmi_wow_del_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +enum wmi_tlv_pattern_type { + WOW_PATTERN_MIN = 0, + WOW_BITMAP_PATTERN = WOW_PATTERN_MIN, + WOW_IPV4_SYNC_PATTERN, + WOW_IPV6_SYNC_PATTERN, + WOW_WILD_CARD_PATTERN, + WOW_TIMER_PATTERN, + WOW_MAGIC_PATTERN, + WOW_IPV6_RA_PATTERN, + WOW_IOAC_PKT_PATTERN, + WOW_IOAC_TMR_PATTERN, + WOW_PATTERN_MAX +}; + +struct wmi_wow_add_del_event_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 is_add; + __le32 event_bitmap; +} __packed; + +struct wmi_wow_enable_cmd { + __le32 tlv_header; + __le32 enable; + __le32 pause_iface_config; + __le32 flags; +} __packed; + +struct wmi_wow_host_wakeup_cmd { + __le32 tlv_header; + __le32 reserved; +} __packed; + +struct wmi_wow_ev_param { + __le32 vdev_id; + __le32 flag; + __le32 wake_reason; + __le32 data_len; +} __packed; + +struct wmi_wow_ev_pg_fault_param { + __le32 len; + u8 data[]; +} __packed; + +struct wmi_wow_ev_arg { + enum wmi_wow_wake_reason wake_reason; +}; + +#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 +#define WMI_PNO_MAX_NETW_CHANNELS 26 +#define WMI_PNO_MAX_NETW_CHANNELS_EX 60 +#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID +#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN + +/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */ +#define WMI_PNO_MAX_PB_REQ_SIZE 450 + +#define WMI_PNO_24GHZ_DEFAULT_CH 1 +#define WMI_PNO_5GHZ_DEFAULT_CH 36 + +#define WMI_ACTIVE_MAX_CHANNEL_TIME 40 +#define WMI_PASSIVE_MAX_CHANNEL_TIME 110 + +/* SSID broadcast type */ +enum wmi_ssid_bcast_type { + BCAST_UNKNOWN = 0, + BCAST_NORMAL = 1, + BCAST_HIDDEN = 2, +}; + +#define WMI_NLO_MAX_SSIDS 16 +#define WMI_NLO_MAX_CHAN 48 + +#define WMI_NLO_CONFIG_STOP BIT(0) +#define WMI_NLO_CONFIG_START BIT(1) +#define WMI_NLO_CONFIG_RESET BIT(2) +#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4) +#define WMI_NLO_CONFIG_FAST_SCAN BIT(5) +#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6) + +/* This bit is used to indicate if EPNO or supplicant PNO is enabled. + * Only one of them can be enabled at a given time + */ +#define WMI_NLO_CONFIG_ENLO BIT(7) +#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8) +#define WMI_NLO_CONFIG_ENLO_RESET BIT(9) +#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10) +#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11) +#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12) +#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13) + +struct wmi_nlo_ssid_params { + __le32 valid; + struct ath12k_wmi_ssid_params ssid; +} __packed; + +struct wmi_nlo_enc_params { + __le32 valid; + __le32 enc_type; +} __packed; + +struct wmi_nlo_auth_params { + __le32 valid; + __le32 auth_type; +} __packed; + +struct wmi_nlo_bcast_nw_params { + __le32 valid; + __le32 bcast_nw_type; +} __packed; + +struct wmi_nlo_rssi_params { + __le32 valid; + __le32 rssi; +} __packed; + +struct nlo_configured_params { + /* TLV tag and len;*/ + __le32 tlv_header; + struct wmi_nlo_ssid_params ssid; + struct wmi_nlo_enc_params enc_type; + struct wmi_nlo_auth_params auth_type; + struct wmi_nlo_rssi_params rssi_cond; + + /* indicates if the SSID is hidden or not */ + struct wmi_nlo_bcast_nw_params bcast_nw_type; +} __packed; + +struct wmi_network_type_arg { + struct cfg80211_ssid ssid; + u32 authentication; + u32 encryption; + u32 bcast_nw_type; + u8 channel_count; + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; + s32 rssi_threshold; +}; + +struct wmi_pno_scan_req_arg { + u8 enable; + u8 vdev_id; + u8 uc_networks_count; + struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; + u32 fast_scan_period; + u32 slow_scan_period; + u8 fast_scan_max_cycles; + + bool do_passive_scan; + + u32 delay_start_time; + u32 active_min_time; + u32 active_max_time; + u32 passive_min_time; + u32 passive_max_time; + + /* mac address randomization attributes */ + u32 enable_pno_scan_randomization; + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +}; + +struct wmi_wow_nlo_config_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 fast_scan_max_cycles; + __le32 active_dwell_time; + __le32 passive_dwell_time; + __le32 probe_bundle_size; + + /* ART = IRT */ + __le32 rest_time; + + /* max value that can be reached after scan_backoff_multiplier */ + __le32 max_rest_time; + + __le32 scan_backoff_multiplier; + __le32 fast_scan_period; + + /* specific to windows */ + __le32 slow_scan_period; + + __le32 no_of_ssids; + + __le32 num_of_channels; + + /* NLO scan start delay time in milliseconds */ + __le32 delay_start_time; + + /* MAC Address to use in Probe Req as SA */ + struct ath12k_wmi_mac_addr_params mac_addr; + + /* Mask on which MAC has to be randomized */ + struct ath12k_wmi_mac_addr_params mac_mask; + + /* IE bitmap to use in Probe Req */ + __le32 ie_bitmap[8]; + + /* Number of vendor OUIs. In the TLV vendor_oui[] */ + __le32 num_vendor_oui; + + /* Number of connected NLO band preferences */ + __le32 num_cnlo_band_pref; + + /* The TLVs will follow. + * nlo_configured_params nlo_list[]; + * u32 channel_list[num_of_channels]; + */ +} __packed; + +/* Definition of HW data filtering */ +enum hw_data_filter_type { + WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0), + WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1), +}; + +struct wmi_hw_data_filter_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enable; + __le32 hw_filter_bitmap; +} __packed; + +struct wmi_hw_data_filter_arg { + u32 vdev_id; + bool enable; + u32 hw_filter_bitmap; +}; + +#define WMI_IPV6_UC_TYPE 0 +#define WMI_IPV6_AC_TYPE 1 + +#define WMI_IPV6_MAX_COUNT 16 +#define WMI_IPV4_MAX_COUNT 2 + +struct wmi_arp_ns_offload_arg { + u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4]; + u32 ipv4_count; + u32 ipv6_count; + u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 ipv6_type[WMI_IPV6_MAX_COUNT]; + bool ipv6_valid[WMI_IPV6_MAX_COUNT]; + u8 mac_addr[ETH_ALEN]; +}; + +#define WMI_MAX_NS_OFFLOADS 2 +#define WMI_MAX_ARP_OFFLOADS 2 + +#define WMI_ARPOL_FLAGS_VALID BIT(0) +#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1) +#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2) + +struct wmi_arp_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[4]; + u8 remote_ipaddr[4]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +#define WMI_NSOL_FLAGS_VALID BIT(0) +#define WMI_NSOL_FLAGS_MAC_VALID BIT(1) +#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2) +#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3) + +#define WMI_NSOL_MAX_TARGET_IPS 2 + +struct wmi_ns_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16]; + u8 solicitation_ipaddr[16]; + u8 remote_ipaddr[16]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +struct wmi_set_arp_ns_offload_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 num_ns_ext_tuples; + /* The TLVs follow: + * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS]; + * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS]; + * wmi_ns_offload_params ns_ext[num_ns_ext_tuples]; + */ +} __packed; + +#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000 +#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000 +#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000 +#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000 + +#define GTK_OFFLOAD_KEK_BYTES 16 +#define GTK_OFFLOAD_KCK_BYTES 16 +#define GTK_REPLAY_COUNTER_BYTES 8 +#define WMI_MAX_KEY_LEN 32 +#define IGTK_PN_SIZE 6 + +struct wmi_gtk_offload_status_event { + __le32 vdev_id; + __le32 flags; + __le32 refresh_cnt; + __le64 replay_ctr; + u8 igtk_key_index; + u8 igtk_key_length; + u8 igtk_key_rsc[IGTK_PN_SIZE]; + u8 igtk_key[WMI_MAX_KEY_LEN]; + u8 gtk_key_index; + u8 gtk_key_length; + u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES]; + u8 gtk_key[WMI_MAX_KEY_LEN]; +} __packed; + +struct wmi_gtk_rekey_offload_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 flags; + u8 kek[GTK_OFFLOAD_KEK_BYTES]; + u8 kck[GTK_OFFLOAD_KCK_BYTES]; + u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES]; +} __packed; + +struct wmi_sta_keepalive_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enabled; + + /* WMI_STA_KEEPALIVE_METHOD_ */ + __le32 method; + + /* in seconds */ + __le32 interval; + + /* following this structure is the TLV for struct + * wmi_sta_keepalive_arp_resp_params + */ +} __packed; + +struct wmi_sta_keepalive_arp_resp_params { + __le32 tlv_header; + __le32 src_ip4_addr; + __le32 dest_ip4_addr; + struct ath12k_wmi_mac_addr_params dest_mac_addr; +} __packed; + +struct wmi_sta_keepalive_arg { + u32 vdev_id; + u32 enabled; + u32 method; + u32 interval; + u32 src_ip4_addr; + u32 dest_ip4_addr; + const u8 dest_mac_addr[ETH_ALEN]; +}; + +enum wmi_sta_keepalive_method { + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, + WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2, + WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3, + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4, + WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5, +}; + +#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 +#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -5054,4 +5612,28 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p WMI_CAPS_PARAMS_HW_LINK_ID); } +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar); +int ath12k_wmi_wow_enable(struct ath12k *ar); +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id); +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset); +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable); +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan); +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, + struct wmi_hw_data_filter_arg *arg); +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable); +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_vif *arvif, bool enable); +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_vif *arvif); +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg); + #endif diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c new file mode 100644 index 000000000000..c5cba825a84a --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wow.c @@ -0,0 +1,1026 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/delay.h> +#include <linux/inetdevice.h> +#include <net/addrconf.h> +#include <net/if_inet6.h> +#include <net/ipv6.h> + +#include "mac.h" + +#include <net/mac80211.h> +#include "core.h" +#include "hif.h" +#include "debug.h" +#include "wmi.h" +#include "wow.h" + +static const struct wiphy_wowlan_support ath12k_wowlan_support = { + .flags = WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, + .pattern_min_len = WOW_MIN_PATTERN_SIZE, + .pattern_max_len = WOW_MAX_PATTERN_SIZE, + .max_pkt_offset = WOW_MAX_PKT_OFFSET, +}; + +static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif) +{ + return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); +} + +int ath12k_wow_enable(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int i, ret; + + clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + /* The firmware might be busy and it can not enter WoW immediately. + * In that case firmware notifies host with + * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again + * later. Per the firmware team there could be up to 10 loops. + */ + for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) { + reinit_completion(&ab->htc_suspend); + + ret = ath12k_wmi_wow_enable(ar); + if (ret) { + ath12k_warn(ab, "failed to issue wow enable: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, + "timed out while waiting for htc suspend completion\n"); + return -ETIMEDOUT; + } + + if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags)) + /* success, suspend complete received */ + return 0; + + ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n", + i); + msleep(ATH12K_WOW_RETRY_WAIT_MS); + } + + ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i); + + return -ETIMEDOUT; +} + +int ath12k_wow_wakeup(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int ret; + + reinit_completion(&ab->wow.wakeup_completed); + + ret = ath12k_wmi_wow_host_wakeup_ind(ar); + if (ret) { + ath12k_warn(ab, "failed to send wow wakeup indication: %d\n", + ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + int i, ret; + + for (i = 0; i < WOW_EVENT_MAX; i++) { + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); + if (ret) { + ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + for (i = 0; i < ar->wow.max_num_patterns; i++) { + ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); + if (ret) { + ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n", + i, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_cleanup(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_wow_vif_cleanup(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +/* Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void +ath12k_wow_convert_8023_to_80211(struct ath12k *ar, + const struct cfg80211_pkt_pattern *eth_pattern, + struct ath12k_pkt_pattern *i80211_pattern) +{ + size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type); + size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1); + size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3); + size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr); + size_t prot_ofs = offsetof(struct ethhdr, h_proto); + size_t src_ofs = offsetof(struct ethhdr, h_source); + u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {}; + const u8 *eth_pat = eth_pattern->pattern; + size_t eth_pat_len = eth_pattern->pattern_len; + size_t eth_pkt_ofs = eth_pattern->pkt_offset; + u8 *bytemask = i80211_pattern->bytemask; + u8 *pat = i80211_pattern->pattern; + size_t pat_len = 0; + size_t pkt_ofs = 0; + size_t delta; + int i; + + /* convert bitmask to bytemask */ + for (i = 0; i < eth_pat_len; i++) + if (eth_pattern->mask[i / 8] & BIT(i % 8)) + eth_bytemask[i] = 0xff; + + if (eth_pkt_ofs < ETH_ALEN) { + pkt_ofs = eth_pkt_ofs + a1_ofs; + + if (eth_pkt_ofs + eth_pat_len < ETH_ALEN) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - src_ofs; + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + delta); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + delta); + + pat_len = a3_ofs - pkt_ofs + delta; + } else { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else if (eth_pkt_ofs < prot_ofs) { + pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs; + + if (eth_pkt_ofs + eth_pat_len < prot_ofs) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else { + memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else { + pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs; + + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } + + i80211_pattern->pattern_len = pat_len; + i80211_pattern->pkt_offset = pkt_ofs; +} + +static int +ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id, + const struct cfg80211_sched_scan_request *nd_config, + struct wmi_pno_scan_req_arg *pno) +{ + int i, j; + u8 ssid_len; + + pno->enable = 1; + pno->vdev_id = vdev_id; + pno->uc_networks_count = nd_config->n_match_sets; + + if (!pno->uc_networks_count || + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) + return -EINVAL; + + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) + return -EINVAL; + + /* Filling per profile params */ + for (i = 0; i < pno->uc_networks_count; i++) { + ssid_len = nd_config->match_sets[i].ssid.ssid_len; + + if (ssid_len == 0 || ssid_len > 32) + return -EINVAL; + + pno->a_networks[i].ssid.ssid_len = ssid_len; + + memcpy(pno->a_networks[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid, + ssid_len); + pno->a_networks[i].authentication = 0; + pno->a_networks[i].encryption = 0; + pno->a_networks[i].bcast_nw_type = 0; + + /* Copying list of valid channel into request */ + pno->a_networks[i].channel_count = nd_config->n_channels; + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; + + for (j = 0; j < nd_config->n_channels; j++) { + pno->a_networks[i].channels[j] = + nd_config->channels[j]->center_freq; + } + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (nd_config->n_ssids == 0) + pno->do_passive_scan = true; + else + pno->do_passive_scan = false; + + for (i = 0; i < nd_config->n_ssids; i++) { + for (j = 0; j < pno->uc_networks_count; j++) { + if (pno->a_networks[j].ssid.ssid_len == + nd_config->ssids[i].ssid_len && + !memcmp(pno->a_networks[j].ssid.ssid, + nd_config->ssids[i].ssid, + pno->a_networks[j].ssid.ssid_len)) { + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; + break; + } + } + } + + if (nd_config->n_scan_plans == 2) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; + pno->slow_scan_period = + nd_config->scan_plans[1].interval * MSEC_PER_SEC; + } else if (nd_config->n_scan_plans == 1) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = 1; + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + } else { + ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d", + nd_config->n_scan_plans); + } + + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + /* enable mac randomization */ + pno->enable_pno_scan_randomization = 1; + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); + } + + pno->delay_start_time = nd_config->delay; + + /* Current FW does not support min-max range for dwell time */ + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; + + return 0; +} + +static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif, + struct cfg80211_wowlan *wowlan) +{ + const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; + struct ath12k *ar = arvif->ar; + unsigned long wow_mask = 0; + int pattern_id = 0; + int ret, i; + + /* Setup requested WOW features */ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_IBSS: + __set_bit(WOW_BEACON_EVENT, &wow_mask); + fallthrough; + case WMI_VDEV_TYPE_AP: + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); + __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); + __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); + __set_bit(WOW_HTT_EVENT, &wow_mask); + __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); + break; + case WMI_VDEV_TYPE_STA: + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + + if (wowlan->nd_config) { + struct wmi_pno_scan_req_arg *pno; + int ret; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + ar->nlo_enabled = true; + + ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id, + wowlan->nd_config, pno); + if (!ret) { + ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); + } + + kfree(pno); + } + break; + default: + break; + } + + for (i = 0; i < wowlan->n_patterns; i++) { + const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i]; + struct ath12k_pkt_pattern new_pattern = {}; + + if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == + ATH12K_HW_TXRX_NATIVE_WIFI) { + ath12k_wow_convert_8023_to_80211(ar, eth_pattern, + &new_pattern); + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + } else { + memcpy(new_pattern.pattern, eth_pattern->pattern, + eth_pattern->pattern_len); + + /* convert bitmask to bytemask */ + for (i = 0; i < eth_pattern->pattern_len; i++) + if (eth_pattern->mask[i / 8] & BIT(i % 8)) + new_pattern.bytemask[i] = 0xff; + + new_pattern.pattern_len = eth_pattern->pattern_len; + new_pattern.pkt_offset = eth_pattern->pkt_offset; + } + + ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id, + pattern_id, + new_pattern.pattern, + new_pattern.bytemask, + new_pattern.pattern_len, + new_pattern.pkt_offset); + if (ret) { + ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n", + pattern_id, + arvif->vdev_id, ret); + return ret; + } + + pattern_id++; + __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); + } + + for (i = 0; i < WOW_EVENT_MAX; i++) { + if (!test_bit(i, &wow_mask)) + continue; + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_wakeups(struct ath12k *ar, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif)) + continue; + ret = ath12k_wow_vif_set_wakeups(arvif, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id) +{ + struct wmi_pno_scan_req_arg *pno; + int ret; + + if (!ar->nlo_enabled) + return 0; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + pno->enable = 0; + ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno); + if (ret) { + ath12k_warn(ar->ab, "failed to disable PNO: %d", ret); + goto out; + } + + ar->nlo_enabled = false; + +out: + kfree(pno); + return ret; +} + +static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id); + default: + return 0; + } +} + +static int ath12k_wow_nlo_cleanup(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif)) + continue; + + ret = ath12k_wow_vif_clean_nlo(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = true; + arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_clear_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = false; + arg.hw_filter_bitmap = 0; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab, + struct wmi_arp_ns_offload_arg *offload) +{ + int i; + + for (i = 0; i < offload->ipv6_count; i++) { + offload->self_ipv6_addr[i][0] = 0xff; + offload->self_ipv6_addr[i][1] = 0x02; + offload->self_ipv6_addr[i][11] = 0x01; + offload->self_ipv6_addr[i][12] = 0xff; + offload->self_ipv6_addr[i][13] = + offload->ipv6_addr[i][13]; + offload->self_ipv6_addr[i][14] = + offload->ipv6_addr[i][14]; + offload->self_ipv6_addr[i][15] = + offload->ipv6_addr[i][15]; + ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n", + offload->self_ipv6_addr[i]); + } +} + +static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev; + struct ath12k_base *ab = arvif->ar->ab; + struct inet6_ifaddr *ifa6; + struct ifacaddr6 *ifaca6; + struct inet6_dev *idev; + u32 count = 0, scope; + + if (!ndev) + return; + + idev = in6_dev_get(ndev); + if (!idev) + return; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n"); + + read_lock_bh(&idev->lock); + + /* get unicast address */ + list_for_each_entry(ifa6, &idev->addr_list, if_list) { + if (count >= WMI_IPV6_MAX_COUNT) + goto unlock; + + if (ifa6->flags & IFA_F_DADFAILED) + continue; + + scope = ipv6_addr_src_scope(&ifa6->addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv6 scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, + sizeof(ifa6->addr.s6_addr)); + offload->ipv6_type[count] = WMI_IPV6_UC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + /* get anycast address */ + rcu_read_lock(); + + for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; + ifaca6 = rcu_dereference(ifaca6->aca_next)) { + if (count >= WMI_IPV6_MAX_COUNT) { + rcu_read_unlock(); + goto unlock; + } + + scope = ipv6_addr_src_scope(&ifaca6->aca_addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, + sizeof(ifaca6->aca_addr)); + offload->ipv6_type[count] = WMI_IPV6_AC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + rcu_read_unlock(); + +unlock: + read_unlock_bh(&idev->lock); + + in6_dev_put(idev); + + offload->ipv6_count = count; + ath12k_wow_generate_ns_mc_addr(ab, offload); +} + +static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_vif_cfg vif_cfg = vif->cfg; + struct ath12k_base *ab = arvif->ar->ab; + u32 ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n"); + + ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT); + memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); + offload->ipv4_count = ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, + "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", + vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr); +} + +static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) +{ + struct wmi_arp_ns_offload_arg *offload; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + offload = kmalloc(sizeof(*offload), GFP_KERNEL); + if (!offload) + return -ENOMEM; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + memset(offload, 0, sizeof(*offload)); + + memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN); + ath12k_wow_prepare_ns_offload(arvif, offload); + ath12k_wow_prepare_arp_offload(arvif, offload); + + ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + kfree(offload); + + return 0; +} + +static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA || + !arvif->is_up || + !arvif->rekey_data.enable_offload) + continue; + + /* get rekey info before disable rekey offload */ + if (!enable) { + ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable); + + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable) +{ + int ret; + + ret = ath12k_wow_arp_ns_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n", + enable, ret); + return ret; + } + + ret = ath12k_gtk_rekey_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n", + enable, ret); + return ret; + } + + return 0; +} + +static int ath12k_wow_set_keepalive(struct ath12k *ar, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_mac_vif_set_keepalive(arvif, method, interval); + if (ret) + return ret; + } + + return 0; +} + +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_wow_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_wakeups(ar, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_protocol_offload(ar, true); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_mac_wait_tx_complete(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_set_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw filter: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DEFAULT); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_enable(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to start wow: %d\n", ret); + goto cleanup; + } + + ath12k_hif_irq_disable(ar->ab); + ath12k_hif_ce_irq_disable(ar->ab); + + ret = ath12k_hif_suspend(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret); + goto wakeup; + } + + goto exit; + +wakeup: + ath12k_wow_wakeup(ar); + +cleanup: + ath12k_wow_cleanup(ar); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret ? 1 : 0; +} + +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + + mutex_lock(&ar->conf_mutex); + device_set_wakeup_enable(ar->ab->dev, enabled); + mutex_unlock(&ar->conf_mutex); +} + +int ath12k_wow_op_resume(struct ieee80211_hw *hw) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_hif_resume(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret); + goto exit; + } + + ath12k_hif_ce_irq_enable(ar->ab); + ath12k_hif_irq_enable(ar->ab); + + ret = ath12k_wow_wakeup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_nlo_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_clear_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_protocol_offload(ar, false); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DISABLE); + if (ret) { + ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret); + goto exit; + } + +exit: + if (ret) { + switch (ah->state) { + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_RESTARTING; + ret = 1; + break; + case ATH12K_HW_STATE_OFF: + case ATH12K_HW_STATE_RESTARTING: + case ATH12K_HW_STATE_RESTARTED: + case ATH12K_HW_STATE_WEDGED: + ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", + ah->state); + ret = -EIO; + break; + } + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath12k_wow_init(struct ath12k *ar) +{ + if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map)) + return 0; + + ar->wow.wowlan_support = ath12k_wowlan_support; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + } + + ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS; + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; + ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support; + + device_set_wakeup_capable(ar->ab->dev, true); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h new file mode 100644 index 000000000000..af9be5fadcc3 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wow.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_WOW_H +#define ATH12K_WOW_H + +#define ATH12K_WOW_RETRY_NUM 10 +#define ATH12K_WOW_RETRY_WAIT_MS 200 +#define ATH12K_WOW_PATTERNS 22 + +struct ath12k_wow { + u32 max_num_patterns; + struct completion wakeup_completed; + struct wiphy_wowlan_support wowlan_support; +}; + +struct ath12k_pkt_pattern { + u8 pattern[WOW_MAX_PATTERN_SIZE]; + u8 bytemask[WOW_MAX_PATTERN_SIZE]; + int pattern_len; + int pkt_offset; +}; + +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 eth_type; +} __packed; + +#ifdef CONFIG_PM + +int ath12k_wow_init(struct ath12k *ar); +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int ath12k_wow_op_resume(struct ieee80211_hw *hw); +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); +int ath12k_wow_enable(struct ath12k *ar); +int ath12k_wow_wakeup(struct ath12k *ar); + +#else + +static inline int ath12k_wow_init(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_enable(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_wakeup(struct ath12k *ar) +{ + return 0; +} +#endif /* CONFIG_PM */ +#endif /* ATH12K_WOW_H */ diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index 4aec1fce1ae2..e22a6732a4c3 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c @@ -180,11 +180,10 @@ static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, struct libipw_txb *txb; int i; - txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask); + txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask); if (!txb) return NULL; - memset(txb, 0, sizeof(struct libipw_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index e71b3bc20253..855cd13a181e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -147,32 +147,34 @@ struct iwl_fw_ini_region_internal_buffer { * Configures parameters for region data collection * * @hdr: debug header - * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID + * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type * @sub_type: region sub type * @sub_type_ver: region sub type version * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by - * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, - * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, - * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, - * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG - * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, - * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, + * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC, + * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX, + * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR, + * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG + * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, + * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, * @dev_addr_range: device address range configuration. Used by - * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and - * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE - * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and - * &IWL_FW_INI_REGION_RXF + * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and + * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE + * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and + * %IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by - * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and - * IWL_FW_INI_REGION_UMAC_ERROR_TABLE + * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and + * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE * @internal_buffer: internal monitor buffer configuration. Used by - * &IWL_FW_INI_REGION_INTERNAL_BUFFER + * %IWL_FW_INI_REGION_INTERNAL_BUFFER + * @special_mem: special device memory region, used by + * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id. - * Used by &IWL_FW_INI_REGION_DRAM_BUFFER - * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV + * Used by %IWL_FW_INI_REGION_DRAM_BUFFER + * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV * @addrs: array of addresses attached to the end of the region tlv */ struct iwl_fw_ini_region_tlv { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 545826973a80..bcbbf8c4a297 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -310,6 +310,13 @@ struct iwl_ac_qos { * @filter_flags: combination of &enum iwl_mac_filter_flags * @qos_flags: from &enum iwl_mac_qos_flags * @ac: one iwl_mac_qos configuration for each AC + * @ap: AP specific config data, see &struct iwl_mac_data_ap + * @go: GO specific config data, see &struct iwl_mac_data_go + * @sta: BSS client specific config data, see &struct iwl_mac_data_sta + * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta + * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev + * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss + * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss */ struct iwl_mac_ctx_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 9ff5d7e538fd..d424d0126367 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -704,6 +704,8 @@ struct iwl_lari_config_change_cmd_v10 { * Each bit represents a country or region, and a band to activate * according to the BIOS definitions. * For LARI cmd version 11 - bits 0:4 are supported. + * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are + * reserved. No need to mask out the reserved bits. * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. * Each bit represents a set of channels in a specific band that should be * disabled @@ -731,9 +733,11 @@ struct iwl_lari_config_change_cmd { __le32 oem_11be_allow_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_11 */ +/* LARI_CHANGE_CONF_CMD_S_VER_12 */ /* Activate UNII-1 (5.2GHz) for World Wide */ -#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index f192e02e4ba8..4d8a12799c4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -144,6 +144,7 @@ struct iwl_phy_context_cmd_v1 { * @rxchain_info: ??? * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz * @sbb_ctrl_channel_loc: location of the control channel + * @puncture_mask: bitmap of punctured subchannels * @dsp_cfg_flags: set to 0 * @reserved: reserved to align to 64 bit */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 19eb6008fe4b..6e6a92d173cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -569,9 +569,12 @@ enum iwl_ppag_flags { * @v2: version 2 * version 3, 4, 5 and 6 are the same structure as v2, * but has a different format of the flags bitmap - * @flags: values from &enum iwl_ppag_flags - * @gain: table of antenna gain values per chain and sub-band - * @reserved: reserved + * @v1.flags: values from &enum iwl_ppag_flags + * @v1.gain: table of antenna gain values per chain and sub-band + * @v1.reserved: reserved + * @v2.flags: values from &enum iwl_ppag_flags + * @v2.gain: table of antenna gain values per chain and sub-band + * @v2.reserved: reserved */ union iwl_ppag_table_cmd { struct { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 8bd85ef90052..691c879cb90d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -710,7 +710,15 @@ struct iwl_rx_mpdu_desc { __le32 reorder_data; union { + /** + * @v1: version 1 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v1 + */ struct iwl_rx_mpdu_desc_v1 v1; + /** + * @v3: version 3 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v3 + */ struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index 5228b837a9ef..560a91998cc4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -497,6 +497,7 @@ static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) size_t cmd_size; switch (cmd_ver) { + case 12: case 11: cmd_size = sizeof(struct iwl_lari_config_change_cmd); break; @@ -563,6 +564,9 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, if (!ret) { if (cmd_ver < 8) value &= ~ACTIVATE_5G2_IN_WW_MASK; + if (cmd_ver < 12) + value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; + cmd->chan_state_active_bitmap = cpu_to_le32(value); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 9810a7f4a591..08d990ba8a79 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1240,12 +1240,6 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, } fwrt->trans->dbg.restart_required = false; - IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n", - tp, dump_data.trig->reset_fw); - IWL_DEBUG_FW(fwrt, - "WRT: restart_required %d, last_tp_resetfw %d\n", - fwrt->trans->dbg.restart_required, - fwrt->trans->dbg.last_tp_resetfw); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { @@ -1255,22 +1249,17 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = 0xFF; - IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { - IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n"); fwrt->trans->dbg.restart_required = true; } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - IWL_DEBUG_FW(fwrt, - "WRT: stop only and no reload firmware\n"); fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = le32_to_cpu(dump_data.trig->reset_fw); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_NOTHING) { - IWL_DEBUG_FW(fwrt, - "WRT: nothing need to be done after debug collection\n"); + /* nothing */ } else { IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", le32_to_cpu(dump_data.trig->reset_fw)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index ecf94ec2c2b0..6148acbac6af 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -120,6 +120,7 @@ enum CMD_MODE { CMD_BLOCK_TXQS = BIT(3), CMD_SEND_IN_D3 = BIT(4), }; +#define CMD_MODE_BITS 5 #define DEF_CMD_PAYLOAD_SIZE 320 @@ -712,7 +713,9 @@ struct iwl_dma_ptr { struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - u32 flags; + u32 flags: CMD_MODE_BITS; + /* sg_offset is valid if it is non-zero */ + u32 sg_offset: PAGE_SHIFT; u32 tbs; }; @@ -749,6 +752,7 @@ struct iwl_pcie_first_tb_buf { * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock + * @reclaim_lock: reclaim lock * @stuck_timer: timer that fires if queue gets stuck * @trans: pointer back to transport (for timer) * @need_update: indicates need to update read/write index @@ -791,6 +795,8 @@ struct iwl_txq { struct iwl_pcie_txq_entry *entries; /* lock for syncing changes on the queue */ spinlock_t lock; + /* lock to prevent concurrent reclaim */ + spinlock_t reclaim_lock; unsigned long frozen_expiry_remainder; struct timer_list stuck_timer; struct iwl_trans *trans; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 73527781f89a..a9929aa49913 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -11,6 +11,7 @@ HOW(BLOCKED_TPT) \ HOW(BLOCKED_FW) \ HOW(BLOCKED_NON_BSS) \ + HOW(BLOCKED_ROC) \ HOW(EXIT_MISSED_BEACON) \ HOW(EXIT_LOW_RSSI) \ HOW(EXIT_COEX) \ @@ -1033,15 +1034,17 @@ void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) return; - if (!(mvmvif->esr_disable_reason & reason)) { - IWL_DEBUG_INFO(mvm, - "Blocking EMLSR mode. reason = %s (0x%x)\n", - iwl_get_esr_state_string(reason), reason); - iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); - } + if (mvmvif->esr_disable_reason & reason) + return; + + IWL_DEBUG_INFO(mvm, + "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); mvmvif->esr_disable_reason |= reason; + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b45a0854a245..835a05b91833 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -59,13 +59,13 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { .num_different_channels = 2, .max_interfaces = 3, .limits = iwl_mvm_limits, - .n_limits = ARRAY_SIZE(iwl_mvm_limits) - 1, + .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, { .num_different_channels = 1, .max_interfaces = 3, .limits = iwl_mvm_limits_ap, - .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap) - 1, + .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap), }, }; @@ -372,7 +372,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && !iwlwifi_mod_params.disable_11ax && !iwlwifi_mod_params.disable_11be) { - hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; /* we handle this already earlier, but need it for MLO */ ieee80211_hw_set(hw, HANDLES_QUIET_CSA); } @@ -383,12 +383,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (!mvm->mld_api_is_used) ieee80211_hw_set(hw, TIMING_BEACON_ONLY); - /* We should probably have this, but mac80211 - * currently doesn't support it for MLO. - */ - if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) - ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); - /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO @@ -2855,6 +2849,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { + mvmvif->session_prot_connection_loss = false; + /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); for_each_mvm_vif_valid_link(mvmvif, i) @@ -4270,8 +4266,12 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + if (info->was_assoc && !mvmvif->session_prot_connection_loss) + return; + guard(mvm)(mvm); iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index ebf313e161f4..3c99396ad369 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -873,6 +873,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { + mvmvif->session_prot_connection_loss = false; + /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); iwl_mvm_sf_update(mvm, vif, false); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 362973fdeac0..22f48b66d79c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -450,6 +450,38 @@ struct iwl_mvm_esr_exit { * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough. * @roc_activity: currently running ROC activity for this vif (or * ROC_NUM_ACTIVITIES if no activity is running). + * @session_prot_connection_loss: the connection was lost due to session + * protection ending without receiving a beacon, so we need to now + * protect the deauth separately + * @ap_early_keys: The firmware cannot install keys before stations etc., + * but higher layers work differently, so we store the keys here for + * later installation. + * @ap_sta: pointer to the AP STA data structure + * @csa_count: CSA counter (old CSA implementation w/o firmware) + * @csa_misbehave: CSA AP misbehaviour flag (old implementation) + * @csa_target_freq: CSA target channel frequency (old implementation) + * @csa_work: CSA work (old implementation) + * @dbgfs_bf: beamforming debugfs data + * @dbgfs_dir: debugfs directory for this vif + * @dbgfs_pm: power management debugfs data + * @dbgfs_quota_min: debugfs value for minimal quota + * @dbgfs_slink: debugfs symlink for this interface + * @ftm_unprotected: unprotected FTM debugfs override + * @hs_time_event_data: hotspot/AUX ROC time event data + * @mac_pwr_cmd: debugfs override for MAC power command + * @target_ipv6_addrs: IPv6 addresses on this interface for offload + * @num_target_ipv6_addrs: number of @target_ipv6_addrs + * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs + * @rekey_data: rekeying data for WoWLAN GTK rekey offload + * @seqno: storage for seqno for older firmware D0/D3 transition + * @seqno_valid: indicates @seqno is valid + * @time_event_data: session protection time event data + * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that) + * @tx_key_idx: WEP transmit key index for D3 + * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to + * not use U-APSD on reconnection + * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation + * in U-APSD */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -463,6 +495,7 @@ struct iwl_mvm_vif { bool pm_enabled; bool monitor_active; bool esr_active; + bool session_prot_connection_loss; u8 low_latency: 6; u8 low_latency_actual: 1; @@ -735,24 +768,20 @@ struct iwl_mvm_tcm { * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer - * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state - * @mvm: mvm pointer, needed for frame timer context */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; - u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; bool valid; spinlock_t lock; - struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; /** @@ -774,6 +803,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) * @tid: tid of the session * @baid: baid of the session * @timeout: the timeout set in the addba request + * @buf_size: the reorder buffer size as set by the last addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update @@ -790,13 +820,14 @@ struct iwl_mvm_baid_data { u8 tid; u8 baid; u16 timeout; + u16 buf_size; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; - struct iwl_mvm_reorder_buf_entry entries[]; + struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp; }; static inline struct iwl_mvm_baid_data * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 5370580f6210..1a210d0c22b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -566,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, lockdep_assert_held(&reorder_buf->lock); while (ieee80211_sn_less(ssn, nssn)) { - int index = ssn % reorder_buf->buf_size; + int index = ssn % baid_data->buf_size; struct sk_buff_head *skb_list = &entries[index].frames; struct sk_buff *skb; @@ -617,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, - reorder_buf->buf_size)); + ba_data->buf_size)); spin_unlock_bh(&reorder_buf->lock); out: @@ -839,7 +839,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, } /* put in reorder buffer */ - index = sn % buffer->buf_size; + index = sn % baid_data->buf_size; __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index c57755e5ff5e..15e64d94d6ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2743,7 +2743,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, */ WARN_ON(1); - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_purge(&entries[j].frames); spin_unlock_bh(&reorder_buf->lock); @@ -2752,7 +2752,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, - u16 ssn, u16 buf_size) + u16 ssn) { int i; @@ -2765,12 +2765,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; - reorder_buf->buf_size = buf_size; spin_lock_init(&reorder_buf->lock); - reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_head_init(&entries[j].frames); } } @@ -2979,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + baid_data->buf_size = buf_size; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); - iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); + iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 2773dfa8baa9..a8c42ce3b630 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -222,6 +222,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); + + mvmvif->session_prot_connection_loss = true; } iwl_mvm_connection_loss(mvm, vif, errmsg); @@ -1055,12 +1057,21 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, u32 *duration_tu, u32 *delay) { - u32 dtim_interval = vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + u32 dtim_interval = 0; *delay = AUX_ROC_MIN_DELAY; *duration_tu = MSEC_TO_TU(duration_ms); + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + dtim_interval = + max_t(u32, dtim_interval, + link_conf->dtim_period * link_conf->beacon_int); + } + rcu_read_unlock(); + /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and @@ -1069,8 +1080,10 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. + * dtim_interval should never be 0, it can be 1 if we don't know it + * (we haven't heard any beacon yet). */ - if (vif->cfg.assoc) { + if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) { *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= *duration_tu) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index f2bb34270ccf..b59de4f80b4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -603,6 +603,22 @@ struct iwl_tso_hdr_page { u8 *pos; }; +/* + * Note that we put this struct *last* in the page. By doing that, we ensure + * that no TB referencing this page can trigger the 32-bit boundary hardware + * bug. + */ +struct iwl_tso_page_info { + dma_addr_t dma_addr; + struct page *next; + refcount_t use_count; +}; + +#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info)) +#define IWL_TSO_PAGE_INFO(addr) \ + ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \ + IWL_TSO_PAGE_DATA_SIZE)) + int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); @@ -623,9 +639,23 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); -struct iwl_tso_hdr_page *iwl_pcie_get_page_hdr(struct iwl_trans *trans, - size_t len, struct sk_buff *skb); -void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr); +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room); + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta); + +static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr) +{ + dma_addr_t res; + + res = IWL_TSO_PAGE_INFO(addr)->dma_addr; + res += (unsigned long)addr & ~PAGE_MASK; + + return res; +} static inline dma_addr_t iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index b897de1b9226..2e780fb2da42 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -19,8 +19,10 @@ static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_page_info *info; struct page **page_ptr; struct page *ret; + dma_addr_t phys; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); @@ -28,8 +30,22 @@ static struct page *get_workaround_page(struct iwl_trans *trans, if (!ret) return NULL; + info = IWL_TSO_PAGE_INFO(page_address(ret)); + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(ret); + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); + /* set the chaining pointer to the previous page if there */ - *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + info->next = *page_ptr; *page_ptr = ret; return ret; @@ -45,7 +61,8 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, dma_addr_t phys, void *virt, - u16 len, struct iwl_cmd_meta *meta) + u16 len, struct iwl_cmd_meta *meta, + bool unmap) { dma_addr_t oldphys = phys; struct page *page; @@ -76,7 +93,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, * a new mapping for it so the device will not fail. */ - if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { + if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) { ret = -ENOBUFS; goto unmap; } @@ -89,10 +106,27 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, memcpy(page_address(page), virt, len); - phys = dma_map_single(trans->dev, page_address(page), len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; + /* + * This is a bit odd, but performance does not matter here, what + * matters are the expectations of the calling code and TB cleanup + * function. + * + * As such, if unmap is set, then create another mapping for the TB + * entry as it will be unmapped later. On the other hand, if it is not + * set, then the TB entry will not be unmapped and instead we simply + * reference and sync the mapping that get_workaround_page() created. + */ + if (unmap) { + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + } else { + phys = iwl_pcie_get_tso_page_phys(page_address(page)); + dma_sync_single_for_device(trans->dev, phys, len, + DMA_TO_DEVICE); + } + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) { /* unmap the new allocation as single */ @@ -100,6 +134,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, meta = NULL; goto unmap; } + IWL_DEBUG_TX(trans, "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", len, (unsigned long long)oldphys, @@ -107,6 +142,9 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, ret = 0; unmap: + if (!unmap) + goto trace; + if (meta) dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); else @@ -119,7 +157,9 @@ trace: static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, int start_len, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta, + int start_len, u8 hdr_len, struct iwl_device_tx_cmd *dev_cmd) { @@ -128,9 +168,10 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; + dma_addr_t start_hdr_phys; u16 length, amsdu_pad; u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; + struct sg_table *sgt; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), @@ -146,11 +187,11 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = iwl_pcie_get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + if (!sgt) return -ENOMEM; - start_hdr = hdr_page->pos; + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); /* * Pull the ieee80211 header to be able to use TSO core, @@ -172,36 +213,34 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, unsigned int data_left = min_t(unsigned int, mss, total_len); unsigned int tb_len; dma_addr_t tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; + u8 *pos_hdr = start_hdr; total_len -= data_left; - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); - hdr_page->pos += snap_ip_tcp_hdrlen; + pos_hdr += snap_ip_tcp_hdrlen; + + tb_len = pos_hdr - start_hdr; + tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); - tb_len = hdr_page->pos - start_hdr; - tb_phys = dma_map_single(trans->dev, start_hdr, - tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; /* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit @@ -211,21 +250,24 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + le16_add_cpu(&tx_cmd->len, tb_len); /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; + start_hdr = pos_hdr; /* put the payload */ while (data_left) { int ret; tb_len = min_t(unsigned int, tso.size, data_left); - tb_phys = dma_map_single(trans->dev, tso.data, - tb_len, DMA_TO_DEVICE); + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) + goto out_err; + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, tso.data, - tb_len, NULL); + tb_len, NULL, false); if (ret) goto out_err; @@ -234,6 +276,9 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, } } + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + /* re -add the WiFi header */ skb_push(skb, hdr_len); @@ -290,8 +335,8 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); - if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) + if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta, + len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ @@ -323,7 +368,7 @@ static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, fragsz, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb_frag_address(frag), - fragsz, out_meta); + fragsz, out_meta, true); if (ret) return ret; } @@ -397,7 +442,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, tb2_len, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb->data + hdr_len, tb2_len, - NULL); + NULL, true); if (ret) goto out_err; } @@ -412,7 +457,8 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, skb_headlen(frag), DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, frag->data, - skb_headlen(frag), NULL); + skb_headlen(frag), NULL, + true); if (ret) goto out_err; if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) @@ -607,6 +653,10 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, return; } + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) @@ -722,7 +772,7 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[idx].meta; - out_meta->flags = 0; + memset(out_meta, 0, sizeof(*out_meta)); tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); if (!tfd) { @@ -771,17 +821,19 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; - spin_lock_bh(&txq->lock); + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans_pcie->txqs.cmd.q_id) { int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta; struct sk_buff *skb = txq->entries[idx].skb; if (!WARN_ON_ONCE(!skb)) - iwl_pcie_free_tso_page(trans, skb); + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); } iwl_txq_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); @@ -793,7 +845,8 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) iwl_op_mode_free_skb(trans->op_mode, skb); } - spin_unlock_bh(&txq->lock); + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); /* just in case - this queue may have been stopped */ iwl_trans_pcie_wake_queue(trans, txq); @@ -1250,7 +1303,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index f4452732417d..22d482ae53d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -209,7 +209,22 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) spin_unlock(&trans_pcie->reg_lock); } -void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) +static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, + struct page *page) +{ + struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page)); + + /* Decrease internal use count and unmap/free page if needed */ + if (refcount_dec_and_test(&info->use_count)) { + dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); + + __free_page(page); + } +} + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page **page_ptr; @@ -220,11 +235,23 @@ void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) *page_ptr = NULL; while (next) { + struct iwl_tso_page_info *info; struct page *tmp = next; - next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - - sizeof(void *)); - __free_page(tmp); + info = IWL_TSO_PAGE_INFO(page_address(next)); + next = info->next; + + /* Unmap the scatter gather list that is on the last page */ + if (!next && cmd_meta->sg_offset) { + struct sg_table *sgt; + + sgt = (void *)((u8 *)page_address(tmp) + + cmd_meta->sg_offset); + + dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); + } + + iwl_pcie_free_and_unmap_tso_page(trans, tmp); } } @@ -276,6 +303,10 @@ static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, return; } + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { @@ -302,20 +333,21 @@ static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans: transport private data * @txq: tx queue + * @read_ptr: the TXQ read_ptr to free * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + int read_ptr) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ - int rd_ptr = txq->read_ptr; - int idx = iwl_txq_get_cmd_index(txq, rd_ptr); + int idx = iwl_txq_get_cmd_index(txq, read_ptr); struct sk_buff *skb; - lockdep_assert_held(&txq->lock); + lockdep_assert_held(&txq->reclaim_lock); if (!txq->entries) return; @@ -325,10 +357,10 @@ static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) */ if (trans->trans_cfg->gen2) iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_txq_get_tfd(trans, txq, rd_ptr)); + iwl_txq_get_tfd(trans, txq, read_ptr)); else iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, - txq, rd_ptr); + txq, read_ptr); /* free SKB */ skb = txq->entries[idx].skb; @@ -356,20 +388,23 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) return; } - spin_lock_bh(&txq->lock); + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans_pcie->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + struct iwl_cmd_meta *cmd_meta = + &txq->entries[txq->read_ptr].meta; if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(trans, skb); + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); } - iwl_txq_free_tfd(trans, txq); + iwl_txq_free_tfd(trans, txq, txq->read_ptr); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && @@ -383,7 +418,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_op_mode_free_skb(trans->op_mode, skb); } - spin_unlock_bh(&txq->lock); + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); /* just in case - this queue may have been stopped */ iwl_trans_pcie_wake_queue(trans, txq); @@ -888,6 +924,7 @@ int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return ret; spin_lock_init(&txq->lock); + spin_lock_init(&txq->reclaim_lock); if (cmd_queue) { static struct lock_class_key iwl_txq_cmd_queue_lock_class; @@ -1022,11 +1059,12 @@ static void iwl_txq_progress(struct iwl_txq *txq) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } -static inline bool iwl_txq_used(const struct iwl_txq *q, int i) +static inline bool iwl_txq_used(const struct iwl_txq *q, int i, + int read_ptr, int write_ptr) { int index = iwl_txq_get_cmd_index(q, i); - int r = iwl_txq_get_cmd_index(q, q->read_ptr); - int w = iwl_txq_get_cmd_index(q, q->write_ptr); + int r = iwl_txq_get_cmd_index(q, read_ptr); + int w = iwl_txq_get_cmd_index(q, write_ptr); return w >= r ? (index >= r && index < w) : @@ -1053,7 +1091,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) r = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || - (!iwl_txq_used(txq, idx))) { + (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, @@ -1420,7 +1458,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ + memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -1702,12 +1741,15 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *iwl_pcie_get_page_hdr(struct iwl_trans *trans, - size_t len, struct sk_buff *skb) +static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, + size_t len, struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page); + struct iwl_tso_page_info *info; struct page **page_ptr; + dma_addr_t phys; + void *ret; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); @@ -1727,24 +1769,124 @@ struct iwl_tso_hdr_page *iwl_pcie_get_page_hdr(struct iwl_trans *trans, * * (see also get_workaround_page() in tx-gen2.c) */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - - sizeof(void *)) + if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) { + info = IWL_TSO_PAGE_INFO(page_address(p->page)); goto out; + } /* We don't have enough room on this page, get a new one. */ - __free_page(p->page); + iwl_pcie_free_and_unmap_tso_page(trans, p->page); alloc: p->page = alloc_page(GFP_ATOMIC); if (!p->page) return NULL; p->pos = page_address(p->page); + + info = IWL_TSO_PAGE_INFO(page_address(p->page)); + /* set the chaining pointer to NULL */ - *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; + info->next = NULL; + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(p->page); + p->page = NULL; + + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); out: *page_ptr = p->page; - get_page(p->page); - return p; + /* Return an internal reference for the caller */ + refcount_inc(&info->use_count); + ret = p->pos; + p->pos += len; + + return ret; +} + +/** + * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list + * @sgt: scatter gather table + * @addr: Virtual address + * + * Find the entry that includes the address for the given address and return + * correct physical address for the TB entry. + * + * Returns: Address for TB entry + */ +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr) +{ + struct scatterlist *sg; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + if (addr >= sg_virt(sg) && + (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg)) + return sg_dma_address(sg) + + ((unsigned long)addr - (unsigned long)sg_virt(sg)); + } + + WARN_ON_ONCE(1); + + return DMA_MAPPING_ERROR; +} + +/** + * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending + * @trans: transport private data + * @skb: the SKB to map + * @cmd_meta: command meta to store the scatter list information for unmapping + * @hdr: output argument for TSO headers + * @hdr_room: requested length for TSO headers + * + * Allocate space for a scatter gather list and TSO headers and map the SKB + * using the scatter gather list. The SKB is unmapped again when the page is + * free'ed again at the end of the operation. + * + * Returns: newly allocated and mapped scatter gather table with list + */ +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room) +{ + struct sg_table *sgt; + + if (WARN_ON_ONCE(skb_has_frag_list(skb))) + return NULL; + + *hdr = iwl_pcie_get_page_hdr(trans, + hdr_room + __alignof__(struct sg_table) + + sizeof(struct sg_table) + + (skb_shinfo(skb)->nr_frags + 1) * + sizeof(struct scatterlist), + skb); + if (!*hdr) + return NULL; + + sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); + sgt->sgl = (void *)(sgt + 1); + + sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); + + sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); + if (WARN_ON_ONCE(sgt->orig_nents <= 0)) + return NULL; + + /* And map the entire SKB */ + if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) + return NULL; + + /* Store non-zero (i.e. valid) offset for unmapping */ + cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK; + + return sgt; } static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, @@ -1759,8 +1901,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; + dma_addr_t start_hdr_phys; + u8 *start_hdr, *pos_hdr; + struct sg_table *sgt; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -1783,13 +1926,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = iwl_pcie_get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + if (!sgt) return -ENOMEM; - start_hdr = hdr_page->pos; - memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); - hdr_page->pos += iv_len; + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); + pos_hdr = start_hdr; + memcpy(pos_hdr, skb->data + hdr_len, iv_len); + pos_hdr += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, @@ -1812,45 +1956,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, min_t(unsigned int, mss, total_len); unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; + u8 *subf_hdrs_start = pos_hdr; total_len -= data_left; - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); - hdr_page->pos += snap_ip_tcp_hdrlen; + pos_hdr += snap_ip_tcp_hdrlen; + + hdr_tb_len = pos_hdr - start_hdr; + hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); - hdr_tb_len = hdr_page->pos - start_hdr; - hdr_tb_phys = dma_map_single(trans->dev, start_hdr, - hdr_tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) - return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; + start_hdr = pos_hdr; /* put the payload */ while (data_left) { @@ -1858,9 +2000,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, data_left); dma_addr_t tb_phys; - tb_phys = dma_map_single(trans->dev, tso.data, - size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, tb_phys, @@ -1873,6 +2015,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, } } + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); @@ -2027,7 +2172,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[txq->write_ptr].meta; - out_meta->flags = 0; + memset(out_meta, 0, sizeof(*out_meta)); /* * The second TB (tb1) points to the remainder of the TX command @@ -2144,12 +2289,12 @@ out_err: } static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) + struct iwl_txq *txq, + int read_ptr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; int txq_id = txq->id; - int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; @@ -2176,6 +2321,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; int tfd_num, read_ptr, last_to_free; + int txq_read_ptr, txq_write_ptr; /* This function is not meant to release cmd queue*/ if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) @@ -2186,8 +2332,14 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, tfd_num = iwl_txq_get_cmd_index(txq, ssn); - spin_lock_bh(&txq->lock); - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); + spin_lock_bh(&txq->reclaim_lock); + + spin_lock(&txq->lock); + txq_read_ptr = txq->read_ptr; + txq_write_ptr = txq->write_ptr; + spin_unlock(&txq->lock); + + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr); if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", @@ -2199,19 +2351,19 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", - txq_id, read_ptr, txq->read_ptr, tfd_num, ssn); + txq_id, read_ptr, txq_read_ptr, tfd_num, ssn); /* Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - if (!iwl_txq_used(txq, last_to_free)) { + if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->trans_cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); + txq_write_ptr, txq_read_ptr); iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_FAKE_TX, @@ -2224,26 +2376,31 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; read_ptr != tfd_num; - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { + txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) { + struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta; struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n", - read_ptr, txq->read_ptr, txq_id)) + read_ptr, txq_read_ptr, txq_id)) continue; - iwl_pcie_free_tso_page(trans, skb); + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; if (!trans->trans_cfg->gen2) - iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, + txq_read_ptr); - iwl_txq_free_tfd(trans, txq); + iwl_txq_free_tfd(trans, txq, txq_read_ptr); } + spin_lock(&txq->lock); + txq->read_ptr = txq_read_ptr; + iwl_txq_progress(txq); if (iwl_txq_space(trans, txq) > txq->low_mark && @@ -2265,13 +2422,12 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->overflow_tx = true; /* - * This is tricky: we are in reclaim path which is non - * re-entrant, so noone will try to take the access the - * txq data from that path. We stopped tx, so we can't - * have tx as well. Bottom line, we can unlock and re-lock - * later. + * This is tricky: we are in reclaim path and are holding + * reclaim_lock, so noone will try to access the txq data + * from that path. We stopped tx, so we can't have tx as well. + * Bottom line, we can unlock and re-lock later. */ - spin_unlock_bh(&txq->lock); + spin_unlock(&txq->lock); while ((skb = __skb_dequeue(&overflow_skbs))) { struct iwl_device_tx_cmd *dev_cmd_ptr; @@ -2290,12 +2446,13 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (iwl_txq_space(trans, txq) > txq->low_mark) iwl_trans_pcie_wake_queue(trans, txq); - spin_lock_bh(&txq->lock); + spin_lock(&txq->lock); txq->overflow_tx = false; } + spin_unlock(&txq->lock); out: - spin_unlock_bh(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); } /* Set wr_ptr of specific device and txq */ diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 175882485a19..c5164ae41b54 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { + if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) + continue; + if ((adapter->priv[i]->bss_num == bss_num) && (adapter->priv[i]->bss_type == bss_type)) break; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index e8ba2e4e8484..bb291fe314fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1125,6 +1125,11 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal)); + if (mstat.wcid) { + status->link_valid = mstat.wcid->link_valid; + status->link_id = mstat.wcid->link_id; + } + *sta = wcid_to_sta(mstat.wcid); *hw = mt76_phy_hw(dev, mstat.phy_idx); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 15f83b5adac7..4a58a78d5ed2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -349,6 +349,8 @@ struct mt76_wcid { u8 sta:1; u8 amsdu:1; u8 phy_idx:2; + u8 link_id:4; + bool link_valid; u8 rx_check_pn; u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; @@ -366,6 +368,8 @@ struct mt76_wcid { struct mt76_sta_stats stats; struct list_head poll_list; + + struct mt76_wcid *def_wcid; }; struct mt76_txq { @@ -1081,6 +1085,7 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); void mt76_pci_disable_aspm(struct pci_dev *pdev); +bool mt76_pci_aspm_supported(struct pci_dev *pdev); static inline u16 mt76_chip(struct mt76_dev *dev) { @@ -1256,6 +1261,9 @@ wcid_to_sta(struct mt76_wcid *wcid) if (!wcid || !wcid->sta) return NULL; + if (wcid->def_wcid) + ptr = wcid->def_wcid; + return container_of(ptr, struct ieee80211_sta, drv_priv); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index c807bd8d928d..d50d967828be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -842,6 +842,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct sk_buff *skb, *sskb, *wskb = NULL; + struct ieee80211_link_sta *link_sta; struct mt7615_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct mt7615_sta *msta; @@ -849,6 +850,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, int cmd, err; msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid); @@ -861,8 +863,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, else mvif->sta_added = true; } - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, sta, enable, - new_entry); + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta, + enable, new_entry); if (enable && sta) mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0, MT76_STA_INFO_STATE_ASSOC); @@ -1109,8 +1111,8 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid, - enable); + return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, + &mvif->sta.wcid, enable); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 162c57fb7954..4dce03ddbfa4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -370,7 +370,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, bool enable, bool newly) { struct sta_rec_basic *basic; @@ -390,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, basic->conn_state = CONN_STATE_DISCONNECT; } - if (!sta) { + if (!link_sta) { basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); if (vif->type == NL80211_IFTYPE_STATION && @@ -411,7 +411,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, else conn_type = CONNECTION_INFRA_STA; basic->conn_type = cpu_to_le32(conn_type); - basic->aid = cpu_to_le16(sta->aid); + basic->aid = cpu_to_le16(link_sta->sta->aid); break; case NL80211_IFTYPE_STATION: if (vif->p2p && !is_mt7921(dev)) @@ -423,15 +423,15 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); - basic->aid = cpu_to_le16(sta->aid); + basic->aid = cpu_to_le16(link_sta->sta->aid); break; default: WARN_ON(1); break; } - memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->qos = sta->wme; + memcpy(basic->peer_addr, link_sta->addr, ETH_ALEN); + basic->qos = link_sta->sta->wme; } EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv); @@ -793,7 +793,8 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2); u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; @@ -801,11 +802,11 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, const struct ieee80211_sta_eht_cap *eht_cap; u8 mode = 0; - if (sta) { - ht_cap = &sta->deflink.ht_cap; - vht_cap = &sta->deflink.vht_cap; - he_cap = &sta->deflink.he_cap; - eht_cap = &sta->deflink.eht_cap; + if (link_sta) { + ht_cap = &link_sta->ht_cap; + vht_cap = &link_sta->vht_cap; + he_cap = &link_sta->he_cap; + eht_cap = &link_sta->eht_cap; } else { struct ieee80211_supported_band *sband; @@ -911,7 +912,8 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); phy = (struct sta_rec_phy *)tlv; - phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta); + phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, + &sta->deflink); phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); phy->rcpi = rcpi; phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, @@ -1044,6 +1046,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy, struct mt76_sta_cmd_info *info) { struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt76_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; @@ -1053,9 +1056,11 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy, if (IS_ERR(skb)) return PTR_ERR(skb); + link_sta = info->sta ? &info->sta->deflink : NULL; if (info->sta || !info->offload_fw) - mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, - info->enable, info->newly); + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + link_sta, info->enable, + info->newly); if (info->sta && info->enable) mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif, info->rcpi, @@ -1132,11 +1137,11 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb, EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv); int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, - struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, struct mt76_wcid *wcid, bool enable) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv; struct mt76_dev *dev = phy->dev; struct { struct { @@ -1148,7 +1153,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, __le16 tag; __le16 len; u8 active; - u8 pad; + u8 link_idx; /* not link_id */ u8 omac_addr[ETH_ALEN]; } __packed tlv; } dev_req = { @@ -1160,6 +1165,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .tag = cpu_to_le16(DEV_INFO_ACTIVE), .len = cpu_to_le16(sizeof(struct req_tlv)), .active = enable, + .link_idx = mvif->idx, }, }; struct { @@ -1182,12 +1188,13 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx), .sta_idx = cpu_to_le16(wcid->idx), .conn_state = 1, + .link_idx = mvif->idx, }, }; int err, idx, cmd, len; void *data; - switch (vif->type) { + switch (bss_conf->vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: @@ -1207,7 +1214,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; basic_req.basic.hw_bss_idx = idx; - memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); + memcpy(dev_req.tlv.omac_addr, bss_conf->addr, ETH_ALEN); cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE); data = enable ? (void *)&dev_req : (void *)&basic_req; @@ -1305,7 +1312,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba); u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct mt76_dev *dev = phy->dev; const struct ieee80211_sta_he_cap *he_cap; @@ -1316,10 +1324,10 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (is_connac_v1(dev)) return 0x38; - if (sta) { - ht_cap = &sta->deflink.ht_cap; - vht_cap = &sta->deflink.vht_cap; - he_cap = &sta->deflink.he_cap; + if (link_sta) { + ht_cap = &link_sta->ht_cap; + vht_cap = &link_sta->vht_cap; + he_cap = &link_sta->he_cap; } else { struct ieee80211_supported_band *sband; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 6873ce14d299..4242d436de26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -545,6 +545,13 @@ struct sta_rec_muru { } mimo_ul; } __packed; +struct sta_rec_remove { + __le16 tag; + __le16 len; + u8 action; + u8 pad[3]; +} __packed; + struct sta_phy { u8 type; u8 flag; @@ -813,7 +820,10 @@ enum { STA_REC_HE_6G = 0x17, STA_REC_HE_V2 = 0x19, STA_REC_MLD = 0x20, + STA_REC_EHT_MLD = 0x21, STA_REC_EHT = 0x22, + STA_REC_MLD_OFF = 0x23, + STA_REC_REMOVE = 0x25, STA_REC_PN_INFO = 0x26, STA_REC_KEY_V3 = 0x27, STA_REC_HDRT = 0x28, @@ -1392,6 +1402,7 @@ enum { MT_NIC_CAP_WFDMA_REALLOC, MT_NIC_CAP_6G, MT_NIC_CAP_CHIP_CAP = 0x20, + MT_NIC_CAP_EML_CAP = 0x22, }; #define UNI_WOW_DETECT_TYPE_MAGIC BIT(0) @@ -1443,7 +1454,7 @@ struct mt76_connac_bss_basic_tlv { __le16 sta_idx; __le16 nonht_basic_phy; u8 phymode_ext; /* bit(0) AX_6G */ - u8 pad[1]; + u8 link_idx; } __packed; struct mt76_connac_bss_qos_tlv { @@ -1733,7 +1744,10 @@ enum mt76_sta_info_state { }; struct mt76_sta_cmd_info { - struct ieee80211_sta *sta; + union { + struct ieee80211_sta *sta; + struct ieee80211_link_sta *link_sta; + }; struct mt76_wcid *wcid; struct ieee80211_vif *vif; @@ -1883,8 +1897,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy); int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable, - bool newly); + struct ieee80211_link_sta *link_sta, + bool enable, bool newly); void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta, void *sta_wtbl, @@ -1898,7 +1912,8 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev, struct mt76_wcid *wcid, int cmd); void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta); u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta); + enum nl80211_band band, + struct ieee80211_link_sta *link_sta); int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -1917,7 +1932,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb, struct ieee80211_ampdu_params *params, bool enable, bool tx); int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, - struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, struct mt76_wcid *wcid, bool enable); int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, @@ -1992,7 +2007,8 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif); const struct ieee80211_sta_eht_cap * mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif); u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta); + enum nl80211_band band, + struct ieee80211_link_sta *sta); u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, enum nl80211_band band); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9599adf104b1..2185cd24e2e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1503,7 +1503,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta); + ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); ra->channel = chandef->chan->hw_value; ra->bw = sta->deflink.bandwidth; ra->phy.bw = sta->deflink.bandwidth; @@ -1656,11 +1656,13 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt7915_sta *msta; struct sk_buff *skb; int ret; msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid); @@ -1668,7 +1670,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable, !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx])); if (!enable) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 73e42ef42983..047106b65d2b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -39,6 +39,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) }; struct ieee80211_sta *sta; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); struct rate_info *rate; @@ -60,23 +61,25 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } - msta = list_first_entry(&sta_poll_list, - struct mt792x_sta, wcid.poll_list); - list_del_init(&msta->wcid.poll_list); + mlink = list_first_entry(&sta_poll_list, + struct mt792x_link_sta, + wcid.poll_list); + msta = container_of(mlink, struct mt792x_sta, deflink); + list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - idx = msta->wcid.idx; + idx = mlink->wcid.idx; addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u32 tx_last = msta->airtime_ac[i]; - u32 rx_last = msta->airtime_ac[i + 4]; + u32 tx_last = mlink->airtime_ac[i]; + u32 rx_last = mlink->airtime_ac[i + 4]; - msta->airtime_ac[i] = mt76_rr(dev, addr); - msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + mlink->airtime_ac[i] = mt76_rr(dev, addr); + mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + tx_time[i] = mlink->airtime_ac[i] - tx_last; + rx_time[i] = mlink->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -87,10 +90,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) if (clear) { mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); } - if (!msta->wcid.sta) + if (!mlink->wcid.sta) continue; sta = container_of((void *)msta, struct ieee80211_sta, @@ -113,7 +116,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) * we need to make sure that flags match so polling GI * from per-sta counters directly. */ - rate = &msta->wcid.rate; + rate = &mlink->wcid.rate; addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_TXRX_CAP_RATE_OFFSET); val = mt76_rr(dev, addr); @@ -154,10 +157,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) rssi[2] = to_rssi(GENMASK(23, 16), val); rssi[3] = to_rssi(GENMASK(31, 14), val); - msta->ack_signal = + mlink->ack_signal = mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); - ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal); } } @@ -180,6 +183,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; u16 seq_ctrl = 0; __le16 fc = 0; u8 mode = 0; @@ -210,10 +214,11 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); if (status->wcid) { - msta = container_of(status->wcid, struct mt792x_sta, wcid); + mlink = container_of(status->wcid, struct mt792x_link_sta, wcid); + msta = container_of(mlink, struct mt792x_sta, deflink); spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); } @@ -444,7 +449,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) { - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -468,15 +473,15 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); if (!wcid->sta) goto out; spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); out: @@ -513,7 +518,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) * 1'b0: msdu_id with the same 'wcid pair' as above. */ if (info & MT_TX_FREE_PAIR) { - struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u16 idx; count++; @@ -523,10 +528,10 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&mdev->sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &mdev->sta_poll_list); spin_unlock_bh(&mdev->sta_poll_lock); continue; @@ -641,11 +646,12 @@ mt7921_vif_connect_iter(void *priv, u8 *mac, if (vif->type == NL80211_IFTYPE_STATION) ieee80211_disconnect(vif, true); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf, + &mvif->sta.deflink.wcid, true); mt7921_mcu_set_tx(dev, vif); if (vif->type == NL80211_IFTYPE_AP) { - mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid, true, NULL); mt7921_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_NONE); @@ -786,9 +792,9 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 4f30426afbb7..2e6268cb06c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -295,40 +295,40 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mt792x_mutex_acquire(dev); - mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { + mvif->bss_conf.mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->bss_conf.mt76.idx >= MT792x_MAX_INTERFACES) { ret = -ENOSPC; goto out; } - mvif->mt76.omac_idx = mvif->mt76.idx; + mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx; mvif->phy = phy; - mvif->mt76.band_idx = 0; - mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + mvif->bss_conf.mt76.band_idx = 0; + mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS; - ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, - true); + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf, + &mvif->sta.deflink.wcid, true); if (ret) goto out; - dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); - phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->bss_conf.mt76.idx); + phy->omac_mask |= BIT_ULL(mvif->bss_conf.mt76.omac_idx); - idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; + idx = MT792x_WTBL_RESERVED - mvif->bss_conf.mt76.idx; - INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); - mvif->sta.wcid.idx = idx; - mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; - mvif->sta.wcid.hw_key_idx = -1; - mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_wcid_init(&mvif->sta.wcid); + INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list); + mvif->sta.deflink.wcid.idx = idx; + mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; + mvif->sta.deflink.wcid.hw_key_idx = -1; + mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mvif->sta.deflink.wcid); mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ewma_rssi_init(&mvif->rssi); + ewma_rssi_init(&mvif->bss_conf.rssi); - rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.deflink.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq->wcid = idx; @@ -494,7 +494,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - struct mt76_wcid *wcid = &msta->wcid; + struct mt76_wcid *wcid = &msta->deflink.wcid; u8 *wcid_keyidx = &wcid->hw_key_idx; int idx = key->keyidx, err = 0; @@ -541,18 +541,18 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } mt76_wcid_key_setup(&dev->mt76, wcid, key); - err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip, + err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->deflink.bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &msta->wcid, cmd); + &msta->deflink.wcid, cmd); if (err) goto out; if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || key->cipher == WLAN_CIPHER_SUITE_WEP40) err = mt76_connac_mcu_add_key(&dev->mt76, vif, - &mvif->wep_sta->bip, + &mvif->wep_sta->deflink.bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &mvif->wep_sta->wcid, cmd); + &mvif->wep_sta->deflink.wcid, cmd); out: mt792x_mutex_release(dev); @@ -718,7 +718,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->bss_conf.mt76, info); } @@ -799,13 +799,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->wcid.poll_list); + INIT_LIST_HEAD(&msta->deflink.wcid.poll_list); msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.phy_idx = mvif->mt76.band_idx; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->last_txs = jiffies; + msta->deflink.wcid.sta = 1; + msta->deflink.wcid.idx = idx; + msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; + msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->deflink.last_txs = jiffies; ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) @@ -840,14 +840,14 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - true, mvif->mt76.ctx); + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.deflink.wcid, + true, mvif->bss_conf.mt76.ctx); - ewma_avg_signal_init(&msta->avg_ack_signal); + ewma_avg_signal_init(&msta->deflink.avg_ack_signal); - mt7921_mac_wtbl_update(dev, msta->wcid.idx, + mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(msta->deflink.airtime_ac, 0, sizeof(msta->deflink.airtime_ac)); mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); @@ -861,27 +861,27 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); - mt7921_mac_wtbl_update(dev, msta->wcid.idx, + mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); if (vif->type == NL80211_IFTYPE_STATION) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mvif->wep_sta = NULL; - ewma_rssi_init(&mvif->rssi); + ewma_rssi_init(&mvif->bss_conf.rssi); if (!sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, - &mvif->sta.wcid, false, - mvif->mt76.ctx); + &mvif->sta.deflink.wcid, false, + mvif->bss_conf.mt76.ctx); } spin_lock_bh(&dev->mt76.sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); + if (!list_empty(&msta->deflink.wcid.poll_list)) + list_del_init(&msta->deflink.wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); mt7921_regd_set_6ghz_power_type(vif, false); @@ -923,12 +923,12 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); mt7921_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); mt7921_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -939,16 +939,16 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->wcid.ampdu_state); + set_bit(tid, &msta->deflink.wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1166,11 +1166,11 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, mt792x_mutex_acquire(dev); if (enabled) - set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags); else - clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags); - mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, + mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->deflink.wcid, MCU_UNI_CMD(STA_REC_UPDATE)); mt792x_mutex_release(dev); @@ -1196,7 +1196,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw, struct mt76_connac_arpns_tlv arpns; } req_hdr = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -1294,8 +1294,8 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, - true, mvif->mt76.ctx); + err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, + true, mvif->bss_conf.mt76.ctx); if (err) goto out; @@ -1326,8 +1326,8 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (err) goto out; - mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false, - mvif->mt76.ctx); + mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, false, + mvif->bss_conf.mt76.ctx); out: mt792x_mutex_release(dev); @@ -1346,32 +1346,27 @@ mt7921_remove_chanctx(struct ieee80211_hw *hw, { } -static void mt7921_ctx_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct ieee80211_chanctx_conf *ctx = priv; - - if (ctx != mvif->mt76.ctx) - return; - - if (vif->type == NL80211_IFTYPE_MONITOR) - mt7921_mcu_config_sniffer(mvif, ctx); - else - mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); -} - static void mt7921_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct ieee80211_vif *vif; + struct mt792x_vif *mvif; + + if (!mctx->bss_conf) + return; + + mvif = container_of(mctx->bss_conf, struct mt792x_vif, bss_conf); + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); mt792x_mutex_acquire(phy->dev); - ieee80211_iterate_active_interfaces(phy->mt76->hw, - IEEE80211_IFACE_ITER_ACTIVE, - mt7921_ctx_iter, ctx); + if (vif->type == NL80211_IFTYPE_MONITOR) + mt7921_mcu_config_sniffer(mvif, ctx); + else + mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76, ctx); mt792x_mutex_release(phy->dev); } @@ -1385,7 +1380,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw, jiffies_to_msecs(HZ); mt792x_mutex_acquire(dev); - mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + mt7921_set_roc(mvif->phy, mvif, mvif->bss_conf.mt76.ctx->def.chan, duration, MT7921_ROC_REQ_JOIN); mt792x_mutex_release(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index bdd8b5f19b24..394fcd799345 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -105,7 +105,7 @@ mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, struct mt76_connac_arpns_tlv arpns; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -260,7 +260,7 @@ mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt76_connac_rssi_notify_event *event = priv; enum nl80211_cqm_rssi_threshold_event nl_event; - s32 rssi = le32_to_cpu(event->rssi[mvif->mt76.idx]); + s32 rssi = le32_to_cpu(event->rssi[mvif->bss_conf.mt76.idx]); if (!rssi) return; @@ -386,9 +386,9 @@ int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev, struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; if (enable && !params->amsdu) - msta->wcid.amsdu = false; + msta->deflink.wcid.amsdu = false; - return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, MCU_UNI_CMD(STA_REC_UPDATE), enable, true); } @@ -399,7 +399,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev, { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, MCU_UNI_CMD(STA_REC_UPDATE), enable, false); } @@ -678,9 +678,9 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) u8 wmm_idx; u8 pad; } __packed req = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .qos = vif->bss_conf.qos, - .wmm_idx = mvif->mt76.wmm_idx, + .wmm_idx = mvif->bss_conf.mt76.wmm_idx, }; struct mu_edca { u8 cw_min; @@ -701,15 +701,15 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) struct mu_edca edca[IEEE80211_NUM_ACS]; u8 pad3[32]; } __packed req_mu = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .qos = vif->bss_conf.qos, - .wmm_idx = mvif->mt76.wmm_idx, + .wmm_idx = mvif->bss_conf.mt76.wmm_idx, }; static const int to_aci[] = { 1, 0, 2, 3 }; int ac, ret; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac]; struct edca *e = &req.edca[to_aci[ac]]; e->aifs = cpu_to_le16(q->aifs); @@ -738,10 +738,10 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) struct ieee80211_he_mu_edca_param_ac_rec *q; struct mu_edca *e; - if (!mvif->queue_params[ac].mu_edca) + if (!mvif->bss_conf.queue_params[ac].mu_edca) break; - q = &mvif->queue_params[ac].mu_edca_param_rec; + q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec; e = &(req_mu.edca[to_aci[ac]]); e->cw_min = q->ecw_min_max & 0xf; @@ -790,7 +790,7 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tokenid = token_id, .reqtype = type, .maxinterval = cpu_to_le32(duration), - .bss_idx = vif->mt76.idx, + .bss_idx = vif->bss_conf.mt76.idx, .control_channel = chan->hw_value, .bw = CMD_CBW_20MHZ, .bw_from_ap = CMD_CBW_20MHZ, @@ -842,7 +842,7 @@ int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tag = cpu_to_le16(UNI_ROC_ABORT), .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), .tokenid = token_id, - .bss_idx = vif->mt76.idx, + .bss_idx = vif->bss_conf.mt76.idx, .dbdcband = 0xff, /* auto*/ }, }; @@ -947,7 +947,7 @@ int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } __packed ps; } __packed ps_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .ps = { .tag = cpu_to_le16(UNI_BSS_INFO_PS), @@ -982,7 +982,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed bcnft; } __packed bcnft_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .bcnft = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), @@ -1015,7 +1015,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, u8 bmc_triggered_ac; u8 pad; } req = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .aid = cpu_to_le16(vif->cfg.aid), .dtim_period = vif->bss_conf.dtim_period, .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), @@ -1024,7 +1024,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, u8 bss_idx; u8 pad[3]; } req_hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }; int err; @@ -1042,7 +1042,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, enum mt76_sta_info_state state) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int rssi = -ewma_rssi_read(&mvif->rssi); + int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi); struct mt76_sta_cmd_info info = { .sta = sta, .vif = vif, @@ -1055,7 +1055,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, struct mt792x_sta *msta; msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; - info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; + info.wcid = msta ? &msta->deflink.wcid : &mvif->sta.deflink.wcid; info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); @@ -1190,7 +1190,7 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, } __packed tlv; } __packed req = { .hdr = { - .band_idx = vif->mt76.band_idx, + .band_idx = vif->bss_conf.mt76.band_idx, }, .tlv = { .tag = cpu_to_le16(1), @@ -1251,7 +1251,7 @@ mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, } __packed beacon_tlv; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .beacon_tlv = { .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), @@ -1460,7 +1460,7 @@ int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif .enable = vif->cfg.assoc, .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst, .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst, - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }; return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR), diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index e75e7b6d3aaf..a7430216a80d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -339,6 +339,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev, bus_ops->rmw = mt7921_rmw; dev->mt76.bus = bus_ops; + if (!mt7921_disable_aspm && mt76_pci_aspm_supported(pdev)) + dev->aspm_supported = true; + ret = mt792xe_mcu_fw_pmctrl(dev); if (ret) goto err_free_dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index 031ba9aaa4e2..2452b1a2d118 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -34,9 +34,9 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index c4cbc8976046..039949b344b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -179,6 +179,12 @@ static void mt7925_init_work(struct work_struct *work) mt76_set_stream_caps(&dev->mphy, true); mt7925_set_stream_he_eht_caps(&dev->phy); + ret = mt7925_init_mlo_caps(&dev->phy); + if (ret) { + dev_err(dev->mt76.dev, "MLO init failed\n"); + return; + } + ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index c2460ef4993d..cf36750cf709 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -28,6 +28,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) }; struct ieee80211_sta *sta; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); struct rate_info *rate; @@ -46,24 +47,25 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) if (list_empty(&sta_poll_list)) break; - msta = list_first_entry(&sta_poll_list, - struct mt792x_sta, wcid.poll_list); + mlink = list_first_entry(&sta_poll_list, + struct mt792x_link_sta, wcid.poll_list); + msta = container_of(mlink, struct mt792x_sta, deflink); spin_lock_bh(&dev->mt76.sta_poll_lock); - list_del_init(&msta->wcid.poll_list); + list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - idx = msta->wcid.idx; + idx = mlink->wcid.idx; addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u32 tx_last = msta->airtime_ac[i]; - u32 rx_last = msta->airtime_ac[i + 4]; + u32 tx_last = mlink->airtime_ac[i]; + u32 rx_last = mlink->airtime_ac[i + 4]; - msta->airtime_ac[i] = mt76_rr(dev, addr); - msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + mlink->airtime_ac[i] = mt76_rr(dev, addr); + mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + tx_time[i] = mlink->airtime_ac[i] - tx_last; + rx_time[i] = mlink->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -74,10 +76,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) if (clear) { mt7925_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); } - if (!msta->wcid.sta) + if (!mlink->wcid.sta) continue; sta = container_of((void *)msta, struct ieee80211_sta, @@ -100,7 +102,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) * we need to make sure that flags match so polling GI * from per-sta counters directly. */ - rate = &msta->wcid.rate; + rate = &mlink->wcid.rate; switch (rate->bw) { case RATE_INFO_BW_160: @@ -144,10 +146,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) rssi[2] = to_rssi(GENMASK(23, 16), val); rssi[3] = to_rssi(GENMASK(31, 14), val); - msta->ack_signal = + mlink->ack_signal = mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); - ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal); } } @@ -365,7 +367,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; u8 mode = 0; /* , band_idx; */ u16 seq_ctrl = 0; __le16 fc = 0; @@ -393,10 +395,10 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); if (status->wcid) { - msta = container_of(status->wcid, struct mt792x_sta, wcid); + mlink = container_of(status->wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); } @@ -738,8 +740,12 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, BSS_CHANGED_BEACON_ENABLED)); bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)); + struct mt792x_bss_conf *mconf; + + mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv, + wcid->link_id) : NULL; + mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL; - mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; if (mvif) { omac_idx = mvif->omac_idx; wmm_idx = mvif->wmm_idx; @@ -800,8 +806,10 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, txwi[5] = cpu_to_le32(val); - val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | - FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + if (!ieee80211_vif_is_mld(vif) || + (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)) + val |= MT_TXD6_DIS_MAT; txwi[6] = cpu_to_le32(val); txwi[7] = 0; @@ -831,27 +839,53 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, } EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi); -static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb, + struct mt76_wcid *wcid) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; struct mt792x_sta *msta; + bool is_8023; u16 fc, tid; - u32 val; - if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + link_sta = rcu_dereference(sta->link[wcid->link_id]); + if (!link_sta) return; - tid = le32_get_bits(txwi[1], MT_TXD1_TID); - if (tid >= 6) /* skip VO queue */ + if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) return; - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + + if (is_8023) { + fc = IEEE80211_FTYPE_DATA | + (sta->wme ? IEEE80211_STYPE_QOS_DATA : + IEEE80211_STYPE_DATA); + } else { + /* No need to get precise TID for Action/Management Frame, + * since it will not meet the following Frame Control + * condition anyway. + */ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + fc = le16_to_cpu(hdr->frame_control) & + (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); + } + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) return; msta = (struct mt792x_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) + + if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED) + mlink = rcu_dereference(msta->link[msta->deflink_id]); + else + mlink = &msta->deflink; + + if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state)) ieee80211_start_tx_ba_session(sta, tid, 0); } @@ -991,7 +1025,7 @@ out_no_skb: void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) { - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink = NULL; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -1015,15 +1049,15 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data); if (!wcid->sta) goto out; spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); out: @@ -1031,7 +1065,7 @@ out: } void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct list_head *free_list) { struct mt76_dev *mdev = &dev->mt76; @@ -1044,10 +1078,8 @@ void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); if (sta) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7925_tx_check_aggr(sta, txwi); + mt7925_tx_check_aggr(sta, t->skb, wcid); wcid_idx = wcid->idx; } else { @@ -1094,7 +1126,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) */ info = le32_to_cpu(*cur_info); if (info & MT_TXFREE_INFO_PAIR) { - struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); @@ -1103,10 +1135,10 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&mdev->sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &mdev->sta_poll_list); spin_unlock_bh(&mdev->sta_poll_lock); continue; @@ -1132,7 +1164,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!txwi) continue; - mt7925_txwi_free(dev, txwi, sta, 0, &free_list); + mt7925_txwi_free(dev, txwi, sta, wcid, &free_list); } } @@ -1235,17 +1267,26 @@ mt7925_vif_connect_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); struct mt792x_dev *dev = mvif->phy->dev; struct ieee80211_hw *hw = mt76_hw(dev); + struct ieee80211_bss_conf *bss_conf; + int i; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_disconnect(vif, true); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); - mt7925_mcu_set_tx(dev, vif); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + + mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, + &mvif->sta.deflink.wcid, true); + mt7925_mcu_set_tx(dev, bss_conf); + } if (vif->type == NL80211_IFTYPE_AP) { - mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid, true, NULL); mt7925_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_NONE); @@ -1380,9 +1421,9 @@ int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } @@ -1417,7 +1458,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, sta = wcid_to_sta(wcid); if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7925_tx_check_aggr(sta, txwi); + mt76_connac2_tx_check_aggr(sta, txwi); skb_pull(e->skb, headroom); mt76_tx_complete_skb(mdev, e->wcid, e->skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 6179798a8845..8c0768bf9343 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -236,6 +236,35 @@ mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band, eht_nss->bw._160.rx_tx_mcs13_max_nss = val; } +int mt7925_init_mlo_caps(struct mt792x_phy *phy) +{ + struct wiphy *wiphy = phy->mt76->hw->wiphy; + static const u8 ext_capa_sta[] = { + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + }; + static struct wiphy_iftype_ext_capab ext_capab[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = ext_capa_sta, + .extended_capabilities_mask = ext_capa_sta, + .extended_capabilities_len = sizeof(ext_capa_sta), + }, + }; + + if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN)) + return 0; + + ext_capab[0].eml_capabilities = phy->eml_cap; + ext_capab[0].mld_capa_and_ops = + u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); + + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + wiphy->iftype_ext_capab = ext_capab; + wiphy->num_iftype_ext_capab = ARRAY_SIZE(ext_capab); + + return 0; +} + static void __mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy, struct ieee80211_supported_band *sband, @@ -317,62 +346,83 @@ static int mt7925_start(struct ieee80211_hw *hw) return err; } -static int -mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, + struct mt792x_link_sta *mlink) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_dev *dev = mt792x_hw_dev(hw); - struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_vif *mvif = mconf->vif; struct mt76_txq *mtxq; int idx, ret = 0; - mt792x_mutex_acquire(dev); - - mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { + mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { ret = -ENOSPC; goto out; } - mvif->mt76.omac_idx = mvif->mt76.idx; - mvif->phy = phy; - mvif->mt76.band_idx = 0; - mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ? + 0 : mconf->mt76.idx; + mconf->mt76.band_idx = 0xff; + mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; - if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) - mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; + if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) + mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; else - mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; + mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; - ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, - true); + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, + &mlink->wcid, true); if (ret) goto out; - dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); - phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx); + mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx); - idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; + idx = MT792x_WTBL_RESERVED - mconf->mt76.idx; - INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); - mvif->sta.wcid.idx = idx; - mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; - mvif->sta.wcid.hw_key_idx = -1; - mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mvif->sta.vif = mvif; - mt76_wcid_init(&mvif->sta.wcid); + INIT_LIST_HEAD(&mlink->wcid.poll_list); + mlink->wcid.idx = idx; + mlink->wcid.phy_idx = mconf->mt76.band_idx; + mlink->wcid.hw_key_idx = -1; + mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mlink->wcid); mt7925_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ewma_rssi_init(&mvif->rssi); + ewma_rssi_init(&mconf->rssi); - rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq->wcid = idx; } +out: + return ret; +} + +static int +mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + int ret = 0; + + mt792x_mutex_acquire(dev); + + mvif->phy = phy; + mvif->bss_conf.vif = mvif; + mvif->sta.vif = mvif; + mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED; + + ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink); + if (ret < 0) + goto out; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; out: mt792x_mutex_release(dev); @@ -386,7 +436,7 @@ static void mt7925_roc_iter(void *priv, u8 *mac, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_phy *phy = priv; - mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id); + mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id); } void mt7925_roc_work(struct work_struct *work) @@ -407,7 +457,8 @@ void mt7925_roc_work(struct work_struct *work) ieee80211_remain_on_channel_expired(phy->mt76->hw); } -static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) +static int mt7925_abort_roc(struct mt792x_phy *phy, + struct mt792x_bss_conf *mconf) { int err = 0; @@ -416,14 +467,14 @@ static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) mt792x_mutex_acquire(phy->dev); if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) - err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + err = mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); mt792x_mutex_release(phy->dev); return err; } static int mt7925_set_roc(struct mt792x_phy *phy, - struct mt792x_vif *vif, + struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type) @@ -435,7 +486,7 @@ static int mt7925_set_roc(struct mt792x_phy *phy, phy->roc_grant = false; - err = mt7925_mcu_set_roc(phy, vif, chan, duration, type, + err = mt7925_mcu_set_roc(phy, mconf, chan, duration, type, ++phy->roc_token_id); if (err < 0) { clear_bit(MT76_STATE_ROC, &phy->mt76->state); @@ -443,7 +494,34 @@ static int mt7925_set_roc(struct mt792x_phy *phy, } if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { - mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + err = -ETIMEDOUT; + } + +out: + return err; +} + +static int mt7925_set_mlo_roc(struct mt792x_phy *phy, + struct mt792x_bss_conf *mconf, + u16 sel_links) +{ + int err; + + if (WARN_ON_ONCE(test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))) + return -EBUSY; + + phy->roc_grant = false; + + err = mt7925_mcu_set_mlo_roc(mconf, sel_links, 5, ++phy->roc_token_id); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + goto out; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { + mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); clear_bit(MT76_STATE_ROC, &phy->mt76->state); err = -ETIMEDOUT; } @@ -463,7 +541,8 @@ static int mt7925_remain_on_channel(struct ieee80211_hw *hw, int err; mt792x_mutex_acquire(phy->dev); - err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC); + err = mt7925_set_roc(phy, &mvif->bss_conf, + chan, duration, MT7925_ROC_REQ_ROC); mt792x_mutex_release(phy->dev); return err; @@ -475,30 +554,31 @@ static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_phy *phy = mt792x_hw_phy(hw); - return mt7925_abort_roc(phy, mvif); + return mt7925_abort_roc(phy, &mvif->bss_conf); } -static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static int mt7925_set_link_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int link_id) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - struct mt76_wcid *wcid = &msta->wcid; - u8 *wcid_keyidx = &wcid->hw_key_idx; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; int idx = key->keyidx, err = 0; - - /* The hardware does not support per-STA RX GTK, fallback - * to software mode for these. - */ - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MESH_POINT) && - (key->cipher == WLAN_CIPHER_SUITE_TKIP || - key->cipher == WLAN_CIPHER_SUITE_CCMP) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; + struct mt792x_link_sta *mlink; + struct mt792x_bss_conf *mconf; + struct mt76_wcid *wcid; + u8 *wcid_keyidx; + + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + link_sta = sta ? mt792x_sta_to_link_sta(vif, sta, link_id) : NULL; + mconf = mt792x_vif_to_link(mvif, link_id); + mlink = mt792x_sta_to_link(msta, link_id); + wcid = &mlink->wcid; + wcid_keyidx = &wcid->hw_key_idx; /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { @@ -522,13 +602,12 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - mt792x_mutex_acquire(dev); - - if (cmd == SET_KEY && !mvif->mt76.cipher) { + if (cmd == SET_KEY && !mconf->mt76.cipher) { struct mt792x_phy *phy = mt792x_hw_phy(hw); - mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher); - mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true); + mconf->mt76.cipher = mt7925_mcu_get_cipher(key->cipher); + mt7925_mcu_add_bss_info(phy, mconf->mt76.ctx, link_conf, + link_sta, true); } if (cmd == SET_KEY) @@ -541,20 +620,59 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip, + err = mt7925_mcu_add_key(&dev->mt76, vif, &mlink->bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &msta->wcid, cmd); + &mlink->wcid, cmd, msta); if (err) goto out; if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || key->cipher == WLAN_CIPHER_SUITE_WEP40) - err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip, + err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->deflink.bip, key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), - &mvif->wep_sta->wcid, cmd); - + &mvif->wep_sta->deflink.wcid, cmd, msta); out: + return err; +} + +static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + int err; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + mt792x_mutex_acquire(dev); + + if (ieee80211_vif_is_mld(vif)) { + unsigned int link_id; + unsigned long add; + + add = key->link_id != -1 ? BIT(key->link_id) : msta->valid_links; + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id); + if (err < 0) + break; + } + } else { + err = mt7925_set_link_key(hw, cmd, vif, sta, key, vif->bss_conf.link_id); + } + mt792x_mutex_release(dev); return err; @@ -695,166 +813,388 @@ mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return mvif->basic_rates_idx; } -static void mt7925_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u64 changed) +static int mt7925_mac_link_sta_add(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; - struct mt792x_phy *phy = mt792x_hw_phy(hw); - struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_bss_conf *link_conf; + struct mt792x_bss_conf *mconf; + u8 link_id = link_sta->link_id; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; + int ret, idx; - mt792x_mutex_acquire(dev); + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_id); - if (changed & BSS_CHANGED_ERP_SLOT) { - int slottime = info->use_short_slot ? 9 : 20; + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; - if (slottime != phy->slottime) { - phy->slottime = slottime; - mt7925_mcu_set_timing(phy, vif); - } - } + mconf = mt792x_vif_to_link(mvif, link_id); + INIT_LIST_HEAD(&mlink->wcid.poll_list); + mlink->wcid.sta = 1; + mlink->wcid.idx = idx; + mlink->wcid.phy_idx = mconf->mt76.band_idx; + mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET; + mlink->last_txs = jiffies; + mlink->wcid.link_id = link_sta->link_id; + mlink->wcid.link_valid = !!link_sta->sta->valid_links; - if (changed & BSS_CHANGED_MCAST_RATE) - mvif->mcast_rates_idx = - mt7925_get_rates_table(hw, vif, false, true); + ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (ret) + return ret; - if (changed & BSS_CHANGED_BASIC_RATES) - mvif->basic_rates_idx = - mt7925_get_rates_table(hw, vif, false, false); + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - if (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED)) { - mvif->beacon_rates_idx = - mt7925_get_rates_table(hw, vif, true, false); + link_conf = mt792x_vif_to_bss_conf(vif, link_id); - mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, - info->enable_beacon); + /* should update bss info before STA add */ + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, + link_conf, link_sta, false); + + if (ieee80211_vif_is_mld(vif) && + link_sta == mlink->pri_link) { + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; + } else if (ieee80211_vif_is_mld(vif) && + link_sta != mlink->pri_link) { + ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif, + true, MT76_STA_INFO_STATE_ASSOC); + if (ret) + return ret; + + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_ASSOC); + if (ret) + return ret; + } else { + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; } - /* ensure that enable txcmd_mode after bss_info */ - if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) - mt7925_mcu_set_tx(dev, vif); + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); - if (changed & BSS_CHANGED_PS) - mt7925_mcu_uni_bss_ps(dev, vif); + return 0; +} - if (changed & BSS_CHANGED_ASSOC) { - mt7925_mcu_sta_update(dev, NULL, vif, true, - MT76_STA_INFO_STATE_ASSOC); - mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); - } +static int +mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned long new_links) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt76_wcid *wcid; + unsigned int link_id; + int err = 0; - if (changed & BSS_CHANGED_ARP_FILTER) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; + + if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) { + mlink = &msta->deflink; + msta->deflink_id = link_id; + } else { + mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), GFP_KERNEL); + if (!mlink) { + err = -ENOMEM; + break; + } - mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info); + wcid = &mlink->wcid; + ewma_signal_init(&wcid->rssi); + rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid); + mt76_wcid_init(wcid); + ewma_avg_signal_init(&mlink->avg_ack_signal); + memset(mlink->airtime_ac, 0, + sizeof(msta->deflink.airtime_ac)); + } + + msta->valid_links |= BIT(link_id); + rcu_assign_pointer(msta->link[link_id], mlink); + mlink->sta = msta; + mlink->pri_link = &sta->deflink; + mlink->wcid.def_wcid = &msta->deflink.wcid; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta); } - mt792x_mutex_release(dev); + return err; } int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int ret, idx; - - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); - if (idx < 0) - return -ENOSPC; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + int err; - INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.phy_idx = mvif->mt76.band_idx; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->last_txs = jiffies; - - ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); - if (ret) - return ret; if (vif->type == NL80211_IFTYPE_STATION) mvif->wep_sta = msta; - mt7925_mac_wtbl_update(dev, idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + if (ieee80211_vif_is_mld(vif)) { + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; - /* should update bss info before STA add */ - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - false); + err = mt7925_mac_sta_add_links(dev, vif, sta, sta->valid_links); + } else { + err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink); + } - ret = mt7925_mcu_sta_update(dev, sta, vif, true, - MT76_STA_INFO_STATE_NONE); - if (ret) - return ret; + return err; +} +EXPORT_SYMBOL_GPL(mt7925_mac_sta_add); - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +static u16 +mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif) +{ + unsigned long usable_links = ieee80211_vif_usable_links(vif); + struct { + u8 link_id; + enum nl80211_band band; + } data[IEEE80211_MLD_MAX_NUM_LINKS]; + u8 link_id, i, j, n_data = 0; + u16 sel_links = 0; + + if (!ieee80211_vif_is_mld(vif)) + return 0; + + if (vif->active_links == usable_links) + return vif->active_links; + + rcu_read_lock(); + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; - return 0; + data[n_data].link_id = link_id; + data[n_data].band = link_conf->chanreq.oper.chan->band; + n_data++; + } + rcu_read_unlock(); + + for (i = 0; i < n_data; i++) { + if (!(BIT(data[i].link_id) & vif->active_links)) + continue; + + sel_links = BIT(data[i].link_id); + + for (j = 0; j < n_data; j++) { + if (data[i].band != data[j].band) { + sel_links |= BIT(data[j].link_id); + break; + } + } + + break; + } + + return sel_links; } -EXPORT_SYMBOL_GPL(mt7925_mac_sta_add); -void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void +mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_bss_conf *link_conf = + mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper; + enum nl80211_band band = chandef->chan->band, secondary_band; + + u16 sel_links = mt7925_mac_select_links(mdev, vif); + u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links); + + if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2) + return; + + link_conf = mt792x_vif_to_bss_conf(vif, secondary_link_id); + secondary_band = link_conf->chanreq.oper.chan->band; + + if (band == NL80211_BAND_2GHZ || + (band == NL80211_BAND_5GHZ && secondary_band == NL80211_BAND_6GHZ)) { + mt7925_abort_roc(mvif->phy, &mvif->bss_conf); + + mt792x_mutex_acquire(dev); + + mt7925_set_mlo_roc(mvif->phy, &mvif->bss_conf, sel_links); + + mt792x_mutex_release(dev); + } + + ieee80211_set_active_links_async(vif, sel_links); +} + +static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_bss_conf *link_conf; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; + + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); mt792x_mutex_acquire(dev); - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - true); + if (ieee80211_vif_is_mld(vif)) { + link_conf = mt792x_vif_to_bss_conf(vif, msta->deflink_id); + } else { + link_conf = mt792x_vif_to_bss_conf(vif, vif->bss_conf.link_id); + } + + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) { + struct mt792x_bss_conf *mconf; - ewma_avg_signal_init(&msta->avg_ack_signal); + mconf = mt792x_link_conf_to_mconf(link_conf); + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, + link_conf, link_sta, true); + } + + ewma_avg_signal_init(&mlink->avg_ack_signal); - mt7925_mac_wtbl_update(dev, msta->wcid.idx, + mt7925_mac_wtbl_update(dev, mlink->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); - mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); + mt7925_mcu_sta_update(dev, link_sta, vif, true, MT76_STA_INFO_STATE_ASSOC); mt792x_mutex_release(dev); } + +void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + if (ieee80211_vif_is_mld(vif)) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct ieee80211_link_sta *link_sta; + + link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id); + + mt7925_mac_set_links(mdev, vif); + + mt7925_mac_link_sta_assoc(mdev, vif, link_sta); + } else { + mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink); + } +} EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc); -void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct ieee80211_bss_conf *link_conf; + u8 link_id = link_sta->link_id; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_id); + + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); - mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); - mt7925_mac_wtbl_update(dev, msta->wcid.idx, + mt7925_mcu_sta_update(dev, link_sta, vif, false, + MT76_STA_INFO_STATE_NONE); + mt7925_mac_wtbl_update(dev, mlink->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - if (vif->type == NL80211_IFTYPE_STATION) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + link_conf = mt792x_vif_to_bss_conf(vif, link_id); - mvif->wep_sta = NULL; - ewma_rssi_init(&mvif->rssi); - if (!sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - false); + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) { + struct mt792x_bss_conf *mconf; + + mconf = mt792x_link_conf_to_mconf(link_conf); + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); } spin_lock_bh(&mdev->sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); + if (!list_empty(&mlink->wcid.poll_list)) + list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&mdev->sta_poll_lock); mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } + +static int +mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned long old_links) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_wcid *wcid; + unsigned int link_id; + + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + if (!link_sta) + continue; + + mlink = mt792x_sta_to_link(msta, link_id); + if (!mlink) + continue; + + mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta); + + wcid = &mlink->wcid; + rcu_assign_pointer(msta->link[link_id], NULL); + msta->valid_links &= ~BIT(link_id); + mlink->sta = NULL; + mlink->pri_link = NULL; + + if (link_sta != mlink->pri_link) { + mt76_wcid_cleanup(mdev, wcid); + mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx); + mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx); + } + + if (msta->deflink_id == link_id) + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; + } + + return 0; +} + +void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + unsigned long rem; + + rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0); + + mt7925_mac_sta_remove_links(dev, vif, sta, rem); + + if (vif->type == NL80211_IFTYPE_STATION) { + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + mvif->wep_sta = NULL; + ewma_rssi_init(&mvif->bss_conf.rssi); + } +} EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove); static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val) @@ -890,12 +1230,12 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); mt7925_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); mt7925_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -906,16 +1246,16 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7925_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->wcid.ampdu_state); + set_bit(tid, &msta->deflink.wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7925_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1156,27 +1496,38 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, bool enabled) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); + unsigned long valid = mvif->valid_links; + u8 i; mt792x_mutex_acquire(dev); - if (enabled) - set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); - else - clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); + + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + struct mt792x_link_sta *mlink; - mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta); + mlink = mt792x_sta_to_link(msta, i); + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); + } mt792x_mutex_release(dev); } #if IS_ENABLED(CONFIG_IPV6) -static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct inet6_dev *idev) +static void __mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_bss_conf *link_conf, + struct inet6_dev *idev) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_dev *dev = mvif->phy->dev; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct inet6_ifaddr *ifa; struct sk_buff *skb; u8 idx = 0; @@ -1190,7 +1541,7 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; } req_hdr = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -1225,6 +1576,23 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work); } + +static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; + int i; + + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + __mt7925_ipv6_addr_change(hw, bss_conf, idev); + } +} + #endif int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, @@ -1280,6 +1648,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_tx_queue_params *params) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, link_id); static const u8 mq_to_aci[] = { [IEEE80211_AC_VO] = 3, [IEEE80211_AC_VI] = 2, @@ -1288,7 +1657,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, }; /* firmware uses access class index */ - mvif->queue_params[mq_to_aci[queue]] = *params; + mconf->queue_params[mq_to_aci[queue]] = *params; return 0; } @@ -1303,12 +1672,12 @@ mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, - true); + err = mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, + link_conf, NULL, true); if (err) goto out; - err = mt7925_mcu_set_bss_pm(dev, vif, true); + err = mt7925_mcu_set_bss_pm(dev, link_conf, true); if (err) goto out; @@ -1330,12 +1699,12 @@ mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt7925_mcu_set_bss_pm(dev, vif, false); + err = mt7925_mcu_set_bss_pm(dev, link_conf, false); if (err) goto out; - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, - false); + mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, link_conf, + NULL, false); out: mt792x_mutex_release(dev); @@ -1354,34 +1723,52 @@ mt7925_remove_chanctx(struct ieee80211_hw *hw, { } -static void mt7925_ctx_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif) +static void +mt7925_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct ieee80211_chanctx_conf *ctx = priv; + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_bss_conf *mconf; + struct ieee80211_vif *vif; + struct mt792x_vif *mvif; - if (ctx != mvif->mt76.ctx) + if (!mctx->bss_conf) return; + mconf = mctx->bss_conf; + mvif = mconf->vif; + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); + + mt792x_mutex_acquire(phy->dev); if (vif->type == NL80211_IFTYPE_MONITOR) { mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true); mt7925_mcu_config_sniffer(mvif, ctx); } else { - mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); - } -} + if (ieee80211_vif_is_mld(vif)) { + unsigned long valid = mvif->valid_links; + u8 i; -static void -mt7925_change_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - u32 changed) -{ - struct mt792x_phy *phy = mt792x_hw_phy(hw); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mt792x_vif_to_link(mvif, i); + if (mconf && mconf->mt76.ctx == ctx) + break; + } + + } else { + mconf = &mvif->bss_conf; + } + + if (mconf) { + struct ieee80211_bss_conf *link_conf; + + link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); + mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76, + link_conf, ctx); + } + } - mt792x_mutex_acquire(phy->dev); - ieee80211_iterate_active_interfaces(phy->mt76->hw, - IEEE80211_IFACE_ITER_ACTIVE, - mt7925_ctx_iter, ctx); mt792x_mutex_release(phy->dev); } @@ -1395,7 +1782,8 @@ static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw, jiffies_to_msecs(HZ); mt792x_mutex_acquire(dev); - mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + mt7925_set_roc(mvif->phy, &mvif->bss_conf, + mvif->bss_conf.mt76.ctx->def.chan, duration, MT7925_ROC_REQ_JOIN); mt792x_mutex_release(dev); } @@ -1406,7 +1794,285 @@ static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw, { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mt7925_abort_roc(mvif->phy, mvif); + mt7925_abort_roc(mvif->phy, &mvif->bss_conf); +} + +static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changed) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; + int i; + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ASSOC) { + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_ASSOC); + mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_update_arp_filter(&dev->mt76, bss_conf); + } + } + + if (changed & BSS_CHANGED_PS) { + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_uni_bss_ps(dev, bss_conf); + } + } + + mt792x_mutex_release(dev); +} + +static void mt7925_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_bss_conf *mconf; + + mconf = mt792x_vif_to_link(mvif, info->link_id); + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt7925_mcu_set_timing(phy, info); + } + } + + if (changed & BSS_CHANGED_MCAST_RATE) + mconf->mt76.mcast_rates_idx = + mt7925_get_rates_table(hw, vif, false, true); + + if (changed & BSS_CHANGED_BASIC_RATES) + mconf->mt76.basic_rates_idx = + mt7925_get_rates_table(hw, vif, false, false); + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) { + mconf->mt76.beacon_rates_idx = + mt7925_get_rates_table(hw, vif, true, false); + + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, + info->enable_beacon); + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) + mt7925_mcu_set_tx(dev, info); + + mt792x_mutex_release(dev); +} + +static int +mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct mt792x_bss_conf *mconfs[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mconf; + struct mt792x_link_sta *mlinks[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mlink; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + int err; + + if (old_links == new_links) + return 0; + + mt792x_mutex_acquire(dev); + + for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mt792x_vif_to_link(mvif, link_id); + mlink = mt792x_sta_to_link(&mvif->sta, link_id); + + if (!mconf || !mlink) + continue; + + if (mconf != &mvif->bss_conf) { + mt792x_mac_link_bss_remove(dev, mconf, mlink); + devm_kfree(dev->mt76.dev, mconf); + devm_kfree(dev->mt76.dev, mlink); + } + + rcu_assign_pointer(mvif->link_conf[link_id], NULL); + rcu_assign_pointer(mvif->sta.link[link_id], NULL); + } + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + if (!old_links) { + mvif->deflink_id = link_id; + mconf = &mvif->bss_conf; + mlink = &mvif->sta.deflink; + } else { + mconf = devm_kzalloc(dev->mt76.dev, sizeof(*mconf), + GFP_KERNEL); + mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), + GFP_KERNEL); + } + + mconfs[link_id] = mconf; + mlinks[link_id] = mlink; + mconf->link_id = link_id; + mconf->vif = mvif; + mlink->wcid.link_id = link_id; + mlink->wcid.link_valid = !!vif->valid_links; + mlink->wcid.def_wcid = &mvif->sta.deflink.wcid; + } + + if (hweight16(mvif->valid_links) == 0) + mt792x_mac_link_bss_remove(dev, &mvif->bss_conf, + &mvif->sta.deflink); + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mconfs[link_id]; + mlink = mlinks[link_id]; + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + + rcu_assign_pointer(mvif->link_conf[link_id], mconf); + rcu_assign_pointer(mvif->sta.link[link_id], mlink); + + err = mt7925_mac_link_bss_add(dev, link_conf, mlink); + if (err < 0) + goto free; + + if (mconf != &mvif->bss_conf) { + err = mt7925_set_mlo_roc(phy, &mvif->bss_conf, + vif->active_links); + if (err < 0) + goto free; + } + } + + mvif->valid_links = new_links; + + mt792x_mutex_release(dev); + + return 0; + +free: + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + rcu_assign_pointer(mvif->link_conf[link_id], NULL); + rcu_assign_pointer(mvif->sta.link[link_id], NULL); + + if (mconf != &mvif->bss_conf) + devm_kfree(dev->mt76.dev, mconfs[link_id]); + if (mlink != &mvif->sta.deflink) + devm_kfree(dev->mt76.dev, mlinks[link_id]); + } + + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_change_sta_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 old_links, u16 new_links) +{ + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err = 0; + + if (old_links == new_links) + return 0; + + mt792x_mutex_acquire(dev); + + err = mt7925_mac_sta_remove_links(dev, vif, sta, rem); + if (err < 0) + goto out; + + err = mt7925_mac_sta_add_links(dev, vif, sta, add); + if (err < 0) + goto out; + +out: + mt792x_mutex_release(dev); + + return err; +} + +static int mt7925_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_bss_conf *pri_link_conf; + struct mt792x_bss_conf *mconf; + + mutex_lock(&dev->mt76.mutex); + + if (ieee80211_vif_is_mld(vif)) { + mconf = mt792x_vif_to_link(mvif, link_conf->link_id); + pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + + if (vif->type == NL80211_IFTYPE_STATION && + mconf == &mvif->bss_conf) + mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf, + NULL, true); + } else { + mconf = &mvif->bss_conf; + } + + mconf->mt76.ctx = ctx; + mctx->bss_conf = mconf; + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_bss_conf *pri_link_conf; + struct mt792x_bss_conf *mconf; + + mutex_lock(&dev->mt76.mutex); + + if (ieee80211_vif_is_mld(vif)) { + mconf = mt792x_vif_to_link(mvif, link_conf->link_id); + pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + + if (vif->type == NL80211_IFTYPE_STATION && + mconf == &mvif->bss_conf) + mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf, + NULL, false); + } else { + mconf = &mvif->bss_conf; + } + + mctx->bss_conf = NULL; + mconf->mt76.ctx = NULL; + mutex_unlock(&dev->mt76.mutex); } const struct ieee80211_ops mt7925_ops = { @@ -1418,7 +2084,6 @@ const struct ieee80211_ops mt7925_ops = { .config = mt7925_config, .conf_tx = mt7925_conf_tx, .configure_filter = mt7925_configure_filter, - .bss_info_changed = mt7925_bss_info_changed, .start_ap = mt7925_start_ap, .stop_ap = mt7925_stop_ap, .sta_state = mt76_sta_state, @@ -1462,10 +2127,14 @@ const struct ieee80211_ops mt7925_ops = { .add_chanctx = mt7925_add_chanctx, .remove_chanctx = mt7925_remove_chanctx, .change_chanctx = mt7925_change_chanctx, - .assign_vif_chanctx = mt792x_assign_vif_chanctx, - .unassign_vif_chanctx = mt792x_unassign_vif_chanctx, + .assign_vif_chanctx = mt7925_assign_vif_chanctx, + .unassign_vif_chanctx = mt7925_unassign_vif_chanctx, .mgd_prepare_tx = mt7925_mgd_prepare_tx, .mgd_complete_tx = mt7925_mgd_complete_tx, + .vif_cfg_changed = mt7925_vif_cfg_changed, + .link_info_changed = mt7925_link_info_changed, + .change_vif_links = mt7925_change_vif_links, + .change_sta_links = mt7925_change_sta_links, }; EXPORT_SYMBOL_GPL(mt7925_ops); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 652a9accc43c..9dc22fbe25d3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -121,11 +121,12 @@ int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set) EXPORT_SYMBOL_GPL(mt7925_mcu_regval); int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, - struct mt76_vif *vif, - struct ieee80211_bss_conf *info) + struct ieee80211_bss_conf *link_conf) { - struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif, - bss_conf); + struct ieee80211_vif *mvif = container_of((void *)link_conf->vif, + struct ieee80211_vif, + drv_priv); + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct sk_buff *skb; int i, len = min_t(int, mvif->cfg.arp_addr_cnt, IEEE80211_BSS_ARP_ADDR_LIST_LEN); @@ -137,7 +138,7 @@ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, struct mt7925_arpns_tlv arp; } req = { .hdr = { - .bss_idx = vif->idx, + .bss_idx = mconf->mt76.idx, }, .arp = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), @@ -308,22 +309,23 @@ mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct mt7925_roc_grant_tlv *grant = priv; + if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION) + return; + if (mvif->idx != grant->bss_idx) return; mvif->band_idx = grant->dbdcband; } -static void -mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev, + struct tlv *tlv) { struct ieee80211_hw *hw = dev->mt76.hw; struct mt7925_roc_grant_tlv *grant; - struct mt7925_mcu_rxd *rxd; int duration; - rxd = (struct mt7925_mcu_rxd *)skb->data; - grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4); + grant = (struct mt7925_roc_grant_tlv *)tlv; /* should never happen */ WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); @@ -342,6 +344,29 @@ mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) } static void +mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct tlv *tlv; + int i = 0; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + + while (i < skb->len) { + tlv = (struct tlv *)(skb->data + i); + + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_ROC_GRANT: + mt7925_mcu_roc_handle_grant(dev, tlv); + break; + case UNI_EVENT_ROC_GRANT_SUB_LINK: + break; + } + + i += le16_to_cpu(tlv->len); + } +} + +static void mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_phy *mphy = &dev->mt76.phy; @@ -544,9 +569,9 @@ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, struct mt792x_vif *mvif = msta->vif; if (enable && !params->amsdu) - msta->wcid.amsdu = false; + msta->deflink.wcid.amsdu = false; - return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, enable, true); } @@ -557,7 +582,7 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; struct mt792x_vif *mvif = msta->vif; - return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, enable, false); } @@ -726,6 +751,20 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data) dev->has_eht = cap->eht; } +static void +mt7925_mcu_parse_eml_cap(struct mt792x_dev *dev, char *data) +{ + struct mt7925_mcu_eml_cap { + u8 rsv[4]; + __le16 eml_cap; + u8 rsv2[6]; + } __packed * cap; + + cap = (struct mt7925_mcu_eml_cap *)data; + + dev->phy.eml_cap = le16_to_cpu(cap->eml_cap); +} + static int mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) { @@ -780,6 +819,12 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) case MT_NIC_CAP_PHY: mt7925_mcu_parse_phy_cap(dev, tlv->data); break; + case MT_NIC_CAP_CHIP_CAP: + memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64)); + break; + case MT_NIC_CAP_EML_CAP: + mt7925_mcu_parse_eml_cap(dev, tlv->data); + break; default: break; } @@ -848,7 +893,7 @@ EXPORT_SYMBOL_GPL(mt7925_run_firmware); static void mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct sta_rec_hdr_trans *hdr_trans; @@ -864,10 +909,15 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, else hdr_trans->from_ds = true; - if (sta) - wcid = (struct mt76_wcid *)sta->drv_priv; - else - wcid = &mvif->sta.wcid; + if (link_sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + struct mt792x_link_sta *mlink; + + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + wcid = &mlink->wcid; + } else { + wcid = &mvif->sta.deflink.wcid; + } if (!wcid) return; @@ -881,27 +931,36 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + int link_id) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta = sta ? &sta->deflink : NULL; + struct mt792x_link_sta *mlink; + struct mt792x_bss_conf *mconf; struct mt792x_sta *msta; struct sk_buff *skb; msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, - &msta->wcid, + mlink = mt792x_sta_to_link(msta, link_id); + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + mconf = mt792x_vif_to_link(mvif, link_id); + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mconf->mt76, + &mlink->wcid, MT7925_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); /* starec hdr trans */ - mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta); + mt7925_mcu_sta_hdr_trans_tlv(skb, vif, link_sta); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); } -int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) +int mt7925_mcu_set_tx(struct mt792x_dev *dev, + struct ieee80211_bss_conf *bss_conf) { #define MCU_EDCA_AC_PARAM 0 #define WMM_AIFS_SET BIT(0) @@ -910,12 +969,12 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) #define WMM_TXOP_SET BIT(3) #define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ WMM_CW_MAX_SET | WMM_TXOP_SET) - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(bss_conf); struct { u8 bss_idx; u8 __rsv[3]; } __packed hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }; struct sk_buff *skb; int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca); @@ -928,7 +987,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) skb_put_data(skb, &hdr, sizeof(hdr)); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct ieee80211_tx_queue_params *q = &mconf->queue_params[ac]; struct edca *e; struct tlv *tlv; @@ -960,11 +1019,12 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, struct mt76_connac_sta_key_conf *sta_key_conf, struct sk_buff *skb, struct ieee80211_key_conf *key, - enum set_key_cmd cmd) + enum set_key_cmd cmd, + struct mt792x_sta *msta) { - struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid); - struct sta_rec_sec_uni *sec; struct mt792x_vif *mvif = msta->vif; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id); + struct sta_rec_sec_uni *sec; struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct tlv *tlv; @@ -976,17 +1036,27 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec)); sec = (struct sta_rec_sec_uni *)tlv; - sec->bss_idx = mvif->mt76.idx; + sec->bss_idx = mconf->mt76.idx; sec->is_authenticator = 0; - sec->mgmt_prot = 0; + sec->mgmt_prot = 1; /* only used in MLO mode */ sec->wlan_idx = (u8)wcid->idx; if (sta) { + struct ieee80211_link_sta *link_sta; + sec->tx_key = 1; sec->key_type = 1; - memcpy(sec->peer_addr, sta->addr, ETH_ALEN); + link_sta = mt792x_sta_to_link_sta(vif, sta, wcid->link_id); + + if (link_sta) + memcpy(sec->peer_addr, link_sta->addr, ETH_ALEN); } else { - memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN); + struct ieee80211_bss_conf *link_conf; + + link_conf = mt792x_vif_to_bss_conf(vif, wcid->link_id); + + if (link_conf) + memcpy(sec->peer_addr, link_conf->bssid, ETH_ALEN); } if (cmd == SET_KEY) { @@ -1031,25 +1101,121 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_connac_sta_key_conf *sta_key_conf, struct ieee80211_key_conf *key, int mcu_cmd, - struct mt76_wcid *wcid, enum set_key_cmd cmd) + struct mt76_wcid *wcid, enum set_key_cmd cmd, + struct mt792x_sta *msta) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id); struct sk_buff *skb; int ret; - skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, wcid, MT7925_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd); + ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd, msta); if (ret) return ret; return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); } -int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, + int duration, u8 token_id) +{ + struct mt792x_vif *mvif = mconf->vif; + struct ieee80211_vif *vif = container_of((void *)mvif, + struct ieee80211_vif, drv_priv); + struct ieee80211_bss_conf *link_conf; + struct ieee80211_channel *chan; + const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 1, + [NL80211_BAND_5GHZ] = 2, + [NL80211_BAND_6GHZ] = 3, + }; + enum mt7925_roc_req type; + int center_ch, i = 0; + bool is_AG_band = false; + struct { + u8 id; + u8 bss_idx; + u16 tag; + struct mt792x_bss_conf *mconf; + struct ieee80211_channel *chan; + } links[2]; + + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_acquire_tlv roc[2]; + } __packed req; + + if (!mconf || hweight16(vif->valid_links) < 2 || + hweight16(sel_links) != 2) + return -EPERM; + + for (i = 0; i < ARRAY_SIZE(links); i++) { + links[i].id = i ? __ffs(~BIT(mconf->link_id) & sel_links) : + mconf->link_id; + link_conf = mt792x_vif_to_bss_conf(vif, links[i].id); + if (WARN_ON_ONCE(!link_conf)) + return -EPERM; + + links[i].chan = link_conf->chanreq.oper.chan; + if (WARN_ON_ONCE(!links[i].chan)) + return -EPERM; + + links[i].mconf = mt792x_vif_to_link(mvif, links[i].id); + links[i].tag = links[i].id == mconf->link_id ? + UNI_ROC_ACQUIRE : UNI_ROC_SUB_LINK; + + is_AG_band |= links[i].chan->band == NL80211_BAND_2GHZ; + } + + if (vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) + type = is_AG_band ? MT7925_ROC_REQ_MLSR_AG : + MT7925_ROC_REQ_MLSR_AA; + else + type = MT7925_ROC_REQ_JOIN; + + for (i = 0; i < ARRAY_SIZE(links) && i < hweight16(vif->active_links); i++) { + if (WARN_ON_ONCE(!links[i].mconf || !links[i].chan)) + continue; + + chan = links[i].chan; + center_ch = ieee80211_frequency_to_channel(chan->center_freq); + req.roc[i].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)); + req.roc[i].tag = cpu_to_le16(links[i].tag); + req.roc[i].tokenid = token_id; + req.roc[i].reqtype = type; + req.roc[i].maxinterval = cpu_to_le32(duration); + req.roc[i].bss_idx = links[i].mconf->mt76.idx; + req.roc[i].control_channel = chan->hw_value; + req.roc[i].bw = CMD_CBW_20MHZ; + req.roc[i].bw_from_ap = CMD_CBW_20MHZ; + req.roc[i].center_chan = center_ch; + req.roc[i].center_chan_from_ap = center_ch; + + /* STR : 0xfe indicates BAND_ALL with enabling DBDC + * EMLSR : 0xff indicates (BAND_AUTO) without DBDC + */ + req.roc[i].dbdcband = type == MT7925_ROC_REQ_JOIN ? 0xfe : 0xff; + + if (chan->hw_value < center_ch) + req.roc[i].sco = 1; /* SCA */ + else if (chan->hw_value > center_ch) + req.roc[i].sco = 3; /* SCB */ + + req.roc[i].band = ch_band[chan->band]; + } + + return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type, u8 token_id) { @@ -1059,25 +1225,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, struct { u8 rsv[4]; } __packed hdr; - struct roc_acquire_tlv { - __le16 tag; - __le16 len; - u8 bss_idx; - u8 tokenid; - u8 control_channel; - u8 sco; - u8 band; - u8 bw; - u8 center_chan; - u8 center_chan2; - u8 bw_from_ap; - u8 center_chan_from_ap; - u8 center_chan2_from_ap; - u8 reqtype; - __le32 maxinterval; - u8 dbdcband; - u8 rsv[3]; - } __packed roc; + struct roc_acquire_tlv roc; } __packed req = { .roc = { .tag = cpu_to_le16(UNI_ROC_ACQUIRE), @@ -1085,7 +1233,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tokenid = token_id, .reqtype = type, .maxinterval = cpu_to_le32(duration), - .bss_idx = vif->mt76.idx, + .bss_idx = mconf->mt76.idx, .control_channel = chan->hw_value, .bw = CMD_CBW_20MHZ, .bw_from_ap = CMD_CBW_20MHZ, @@ -1116,7 +1264,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, &req, sizeof(req), false); } -int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id) { struct mt792x_dev *dev = phy->dev; @@ -1137,7 +1285,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tag = cpu_to_le16(UNI_ROC_ABORT), .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), .tokenid = token_id, - .bss_idx = vif->mt76.idx, + .bss_idx = mconf->mt76.idx, .dbdcband = 0xff, /* auto*/ }, }; @@ -1146,80 +1294,6 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, &req, sizeof(req), false); } -int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag) -{ - static const u8 ch_band[] = { - [NL80211_BAND_2GHZ] = 0, - [NL80211_BAND_5GHZ] = 1, - [NL80211_BAND_6GHZ] = 2, - }; - struct mt792x_dev *dev = phy->dev; - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - int freq1 = chandef->center_freq1; - u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ; - struct { - /* fixed field */ - u8 __rsv[4]; - - __le16 tag; - __le16 len; - u8 control_ch; - u8 center_ch; - u8 bw; - u8 tx_path_num; - u8 rx_path; /* mask or num */ - u8 switch_reason; - u8 band_idx; - u8 center_ch2; /* for 80+80 only */ - __le16 cac_case; - u8 channel_band; - u8 rsv0; - __le32 outband_freq; - u8 txpower_drop; - u8 ap_bw; - u8 ap_center_ch; - u8 rsv1[53]; - } __packed req = { - .tag = cpu_to_le16(tag), - .len = cpu_to_le16(sizeof(req) - 4), - .control_ch = chandef->chan->hw_value, - .center_ch = ieee80211_frequency_to_channel(freq1), - .bw = mt76_connac_chan_bw(chandef), - .tx_path_num = hweight8(phy->mt76->antenna_mask), - .rx_path = phy->mt76->antenna_mask, - .band_idx = band_idx, - .channel_band = ch_band[chandef->chan->band], - }; - - if (chandef->chan->band == NL80211_BAND_6GHZ) - req.channel_band = 2; - else - req.channel_band = chandef->chan->band; - - if (tag == UNI_CHANNEL_RX_PATH || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) - req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) - req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; - else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, - NL80211_IFTYPE_AP)) - req.switch_reason = CH_SWITCH_DFS; - else - req.switch_reason = CH_SWITCH_NORMAL; - - if (tag == UNI_CHANNEL_SWITCH) - req.rx_path = hweight8(req.rx_path); - - if (chandef->width == NL80211_CHAN_WIDTH_80P80) { - int freq2 = chandef->center_freq2; - - req.center_ch2 = ieee80211_frequency_to_channel(freq2); - } - - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH), - &req, sizeof(req), true); -} - int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) { struct { @@ -1242,9 +1316,10 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); -int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1263,16 +1338,16 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } __packed ps; } __packed ps_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .ps = { .tag = cpu_to_le16(UNI_BSS_INFO_PS), .len = cpu_to_le16(sizeof(struct ps_tlv)), - .ps_state = vif->cfg.ps ? 2 : 0, + .ps_state = link_conf->vif->cfg.ps ? 2 : 0, }, }; - if (vif->type != NL80211_IFTYPE_STATION) + if (link_conf->vif->type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), @@ -1280,10 +1355,10 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } static int -mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, - bool enable) +mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1300,17 +1375,17 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed bcnft; } __packed bcnft_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .bcnft = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), .len = cpu_to_le16(sizeof(struct bcnft_tlv)), - .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), - .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(link_conf->beacon_int), + .dtim_period = link_conf->dtim_period, }, }; - if (vif->type != NL80211_IFTYPE_STATION) + if (link_conf->vif->type != NL80211_IFTYPE_STATION) return 0; return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), @@ -1318,10 +1393,11 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } int -mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, +mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1338,13 +1414,13 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed enable; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .enable = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), .len = cpu_to_le16(sizeof(struct bcnft_tlv)), - .dtim_period = vif->bss_conf.dtim_period, - .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = link_conf->dtim_period, + .bcn_interval = cpu_to_le16(link_conf->beacon_int), }, }; struct { @@ -1358,7 +1434,7 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed disable; } req1 = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .disable = { .tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE), @@ -1377,42 +1453,43 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } static void -mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { - if (!sta->deflink.he_cap.has_he) + if (!link_sta->he_cap.has_he) return; - mt76_connac_mcu_sta_he_tlv_v2(skb, sta); + mt76_connac_mcu_sta_he_tlv_v2(skb, link_sta->sta); } static void -mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, + struct ieee80211_link_sta *link_sta) { struct sta_rec_he_6g_capa *he_6g; struct tlv *tlv; - if (!sta->deflink.he_6ghz_capa.capa) + if (!link_sta->he_6ghz_capa.capa) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g)); he_6g = (struct sta_rec_he_6g_capa *)tlv; - he_6g->capa = sta->deflink.he_6ghz_capa.capa; + he_6g->capa = link_sta->he_6ghz_capa.capa; } static void -mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct ieee80211_eht_mcs_nss_supp *mcs_map; struct ieee80211_eht_cap_elem_fixed *elem; struct sta_rec_eht *eht; struct tlv *tlv; - if (!sta->deflink.eht_cap.has_eht) + if (!link_sta->eht_cap.has_eht) return; - mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp; - elem = &sta->deflink.eht_cap.eht_cap_elem; + mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp; + elem = &link_sta->eht_cap.eht_cap_elem; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht)); @@ -1422,50 +1499,52 @@ mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info); eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]); - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20)); memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80)); memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160)); } static void -mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_ht *ht; struct tlv *tlv; - if (!sta->deflink.ht_cap.ht_supported) + if (!link_sta->ht_cap.ht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); + ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap); } static void -mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_vht *vht; struct tlv *tlv; /* For 6G band, this tlv is necessary to let hw work normally */ - if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported) + if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); - vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; + vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap); + vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map; } static void mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + struct mt792x_link_sta *mlink; struct sta_rec_amsdu *amsdu; struct tlv *tlv; @@ -1473,16 +1552,18 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, vif->type != NL80211_IFTYPE_AP) return; - if (!sta->deflink.agg.max_amsdu_len) + if (!link_sta->agg.max_amsdu_len) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); amsdu = (struct sta_rec_amsdu *)tlv; amsdu->max_amsdu_num = 8; amsdu->amsdu_en = true; - msta->wcid.amsdu = true; - switch (sta->deflink.agg.max_amsdu_len) { + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + mlink->wcid.amsdu = true; + + switch (link_sta->agg.max_amsdu_len) { case IEEE80211_MAX_MPDU_LEN_VHT_11454: amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; @@ -1499,34 +1580,44 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, static void mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def *chandef; + struct mt792x_bss_conf *mconf; struct sta_rec_phy *phy; struct tlv *tlv; u8 af = 0, mm = 0; + link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id); + mconf = mt792x_vif_to_link(mvif, link_sta->link_id); + chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def : + &link_conf->chanreq.oper; + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); phy = (struct sta_rec_phy *)tlv; - phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta); - phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); - if (sta->deflink.ht_cap.ht_supported) { - af = sta->deflink.ht_cap.ampdu_factor; - mm = sta->deflink.ht_cap.ampdu_density; + phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, + chandef->chan->band, + link_sta); + phy->basic_rate = cpu_to_le16((u16)link_conf->basic_rates); + if (link_sta->ht_cap.ht_supported) { + af = link_sta->ht_cap.ampdu_factor; + mm = link_sta->ht_cap.ampdu_density; } - if (sta->deflink.vht_cap.vht_supported) { + if (link_sta->vht_cap.vht_supported) { u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, - sta->deflink.vht_cap.cap); + link_sta->vht_cap.cap); af = max_t(u8, af, vht_af); } - if (sta->deflink.he_6ghz_capa.capa) { - af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + if (link_sta->he_6ghz_capa.capa) { + af = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + mm = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); } @@ -1537,7 +1628,7 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, static void mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, - struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, u8 rcpi, u8 sta_state) { @@ -1557,28 +1648,37 @@ mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, state = (struct sta_rec_state_v2 *)tlv; state->state = sta_state; - if (sta->deflink.vht_cap.vht_supported) { - state->vht_opmode = sta->deflink.bandwidth; - state->vht_opmode |= sta->deflink.rx_nss << + if (link_sta->vht_cap.vht_supported) { + state->vht_opmode = link_sta->bandwidth; + state->vht_opmode |= link_sta->rx_nss << IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; } } static void mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; - enum nl80211_band band = chandef->chan->band; + struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def *chandef; struct sta_rec_ra_info *ra_info; + struct mt792x_bss_conf *mconf; + enum nl80211_band band; struct tlv *tlv; u16 supp_rates; + link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id); + mconf = mt792x_vif_to_link(mvif, link_sta->link_id); + chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def : + &link_conf->chanreq.oper; + band = chandef->chan->band; + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; - supp_rates = sta->deflink.supp_rates[band]; + supp_rates = link_sta->supp_rates[band]; if (band == NL80211_BAND_2GHZ) supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); @@ -1587,29 +1687,80 @@ mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, ra_info->legacy = cpu_to_le16(supp_rates); - if (sta->deflink.ht_cap.ht_supported) + if (link_sta->ht_cap.ht_supported) memcpy(ra_info->rx_mcs_bitmask, - sta->deflink.ht_cap.mcs.rx_mask, + link_sta->ht_cap.mcs.rx_mask, HT_MCS_MASK_NUM); } static void +mt7925_mcu_sta_eht_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct wiphy *wiphy = mvif->phy->mt76->hw->wiphy; + const struct wiphy_iftype_ext_capab *ext_capa; + struct sta_rec_eht_mld *eht_mld; + struct tlv *tlv; + u16 eml_cap; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld)); + eht_mld = (struct sta_rec_eht_mld *)tlv; + eht_mld->mld_type = 0xff; + + if (!ieee80211_vif_is_mld(vif)) + return; + + ext_capa = cfg80211_get_iftype_ext_capa(wiphy, + ieee80211_vif_type_p2p(vif)); + if (!ext_capa) + return; + + eml_cap = (vif->cfg.eml_cap & (IEEE80211_EML_CAP_EMLSR_SUPP | + IEEE80211_EML_CAP_TRANSITION_TIMEOUT)) | + (ext_capa->eml_capabilities & (IEEE80211_EML_CAP_EMLSR_PADDING_DELAY | + IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)); + + if (eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) { + eht_mld->eml_cap[0] = u16_get_bits(eml_cap, GENMASK(7, 0)); + eht_mld->eml_cap[1] = u16_get_bits(eml_cap, GENMASK(15, 8)); + } else { + eht_mld->str_cap[0] = BIT(1); + } +} + +static void mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + unsigned long valid = mvif->valid_links; + struct mt792x_bss_conf *mconf; + struct mt792x_link_sta *mlink; struct sta_rec_mld *mld; struct tlv *tlv; + int i, cnt = 0; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld)); mld = (struct sta_rec_mld *)tlv; - memcpy(mld->mac_addr, vif->addr, ETH_ALEN); - mld->primary_id = cpu_to_le16(wcid->idx); - mld->wlan_id = cpu_to_le16(wcid->idx); + memcpy(mld->mac_addr, sta->addr, ETH_ALEN); + mld->primary_id = cpu_to_le16(msta->deflink.wcid.idx); + mld->wlan_id = cpu_to_le16(msta->deflink.wcid.idx); + mld->link_num = min_t(u8, hweight16(mvif->valid_links), 2); + + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + if (cnt == mld->link_num) + break; + + mconf = mt792x_vif_to_link(mvif, i); + mlink = mt792x_sta_to_link(msta, i); + mld->link[cnt].wlan_id = cpu_to_le16(mlink->wcid.idx); + mld->link[cnt++].bss_idx = mconf->mt76.idx; - /* TODO: 0 means deflink only, add secondary link(1) later */ - mld->link_num = !!(hweight8(vif->active_links) > 1); - WARN_ON_ONCE(mld->link_num); + if (mlink != &msta->deflink) + mld->secondary_id = cpu_to_le16(mlink->wcid.idx); + } } static int @@ -1625,39 +1776,106 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy, if (IS_ERR(skb)) return PTR_ERR(skb); - if (info->sta || !info->offload_fw) - mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, + if (info->link_sta) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + info->link_sta, info->enable, info->newly); - if (info->sta && info->enable) { - mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_ht_tlv(skb, info->sta); - mt7925_mcu_sta_vht_tlv(skb, info->sta); - mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta); - mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_he_tlv(skb, info->sta); - mt7925_mcu_sta_he_6g_tlv(skb, info->sta); - mt7925_mcu_sta_eht_tlv(skb, info->sta); - mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta, + if (info->link_sta && info->enable) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_ht_tlv(skb, info->link_sta); + mt7925_mcu_sta_vht_tlv(skb, info->link_sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_he_tlv(skb, info->link_sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); + mt7925_mcu_sta_eht_tlv(skb, info->link_sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, + info->link_sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta, info->vif, info->rcpi, info->state); - mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); } if (info->enable) - mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); + + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); +} + +static void +mt7925_mcu_sta_remove_tlv(struct sk_buff *skb) +{ + struct sta_rec_remove *rem; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, 0x25, sizeof(*rem)); + rem = (struct sta_rec_remove *)tlv; + rem->action = 0; +} + +static int +mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy, + struct mt76_sta_cmd_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct mt792x_bss_conf *mconf; + struct sk_buff *skb; + + mconf = mt792x_vif_to_link(mvif, info->wcid->link_id); + + skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, info->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + if (info->enable) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + info->link_sta, + info->enable, info->newly); + + if (info->enable && info->link_sta) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_ht_tlv(skb, info->link_sta); + mt7925_mcu_sta_vht_tlv(skb, info->link_sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_he_tlv(skb, info->link_sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); + mt7925_mcu_sta_eht_tlv(skb, info->link_sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, + info->link_sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta, + info->vif, info->rcpi, + info->state); + + if (info->state != MT76_STA_INFO_STATE_NONE) { + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta); + } + + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); + } + + if (!info->enable) { + mt7925_mcu_sta_remove_tlv(skb); + mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, + sizeof(struct tlv)); + } return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); } -int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, +int mt7925_mcu_sta_update(struct mt792x_dev *dev, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int rssi = -ewma_rssi_read(&mvif->rssi); + int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi); struct mt76_sta_cmd_info info = { - .sta = sta, + .link_sta = link_sta, .vif = vif, .enable = enable, .cmd = MCU_UNI_CMD(STA_REC_UPDATE), @@ -1666,12 +1884,22 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, .rcpi = to_rcpi(rssi), }; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; + int err; + + if (link_sta) { + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + } + info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid; + info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true; - msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; - info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; - info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; + if (ieee80211_vif_is_mld(vif)) + err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info); + else + err = mt7925_mcu_sta_cmd(&dev->mphy, &info); - return mt7925_mcu_sta_cmd(&dev->mphy, &info); + return err; } int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, @@ -1680,21 +1908,32 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, { #define MT7925_FIF_BIT_CLR BIT(1) #define MT7925_FIF_BIT_SET BIT(0) + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; int err = 0; + int i; if (enable) { - err = mt7925_mcu_uni_bss_bcnft(dev, vif, true); - if (err) - return err; + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true); + if (err < 0) + return err; + } return mt7925_mcu_set_rxfilter(dev, 0, MT7925_FIF_BIT_SET, MT_WF_RFCR_DROP_OTHER_BEACON); } - err = mt7925_mcu_set_bss_pm(dev, vif, false); - if (err) - return err; + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + err = mt7925_mcu_set_bss_pm(dev, bss_conf, false); + if (err) + return err; + } return mt7925_mcu_set_rxfilter(dev, 0, MT7925_FIF_BIT_CLR, @@ -1746,7 +1985,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed enable; } __packed req = { .hdr = { - .band_idx = mvif->mt76.band_idx, + .band_idx = mvif->bss_conf.mt76.band_idx, }, .enable = { .tag = cpu_to_le16(UNI_SNIFFER_ENABLE), @@ -1805,7 +2044,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, } __packed tlv; } __packed req = { .hdr = { - .band_idx = vif->mt76.band_idx, + .band_idx = vif->bss_conf.mt76.band_idx, }, .tlv = { .tag = cpu_to_le16(UNI_SNIFFER_CONFIG), @@ -1866,7 +2105,7 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, } __packed beacon_tlv; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .beacon_tlv = { .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), @@ -1918,83 +2157,59 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, &req, sizeof(req), true); } -int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, - struct ieee80211_chanctx_conf *ctx) +static +void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) { - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; enum nl80211_band band = chandef->chan->band; - struct mt76_dev *mdev = phy->dev; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct rlm_tlv { - __le16 tag; - __le16 len; - u8 control_channel; - u8 center_chan; - u8 center_chan2; - u8 bw; - u8 tx_streams; - u8 rx_streams; - u8 ht_op_info; - u8 sco; - u8 band; - u8 pad[3]; - } __packed rlm; - } __packed rlm_req = { - .hdr = { - .bss_idx = mvif->idx, - }, - .rlm = { - .tag = cpu_to_le16(UNI_BSS_INFO_RLM), - .len = cpu_to_le16(sizeof(struct rlm_tlv)), - .control_channel = chandef->chan->hw_value, - .center_chan = ieee80211_frequency_to_channel(freq1), - .center_chan2 = ieee80211_frequency_to_channel(freq2), - .tx_streams = hweight8(phy->antenna_mask), - .ht_op_info = 4, /* set HT 40M allowed */ - .rx_streams = hweight8(phy->antenna_mask), - .band = band, - }, - }; + struct bss_rlm_tlv *req; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req)); + req = (struct bss_rlm_tlv *)tlv; + req->control_channel = chandef->chan->hw_value, + req->center_chan = ieee80211_frequency_to_channel(freq1), + req->center_chan2 = ieee80211_frequency_to_channel(freq2), + req->tx_streams = hweight8(phy->antenna_mask), + req->ht_op_info = 4, /* set HT 40M allowed */ + req->rx_streams = hweight8(phy->antenna_mask), + req->band = band; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: - rlm_req.rlm.bw = CMD_CBW_40MHZ; + req->bw = CMD_CBW_40MHZ; break; case NL80211_CHAN_WIDTH_80: - rlm_req.rlm.bw = CMD_CBW_80MHZ; + req->bw = CMD_CBW_80MHZ; break; case NL80211_CHAN_WIDTH_80P80: - rlm_req.rlm.bw = CMD_CBW_8080MHZ; + req->bw = CMD_CBW_8080MHZ; break; case NL80211_CHAN_WIDTH_160: - rlm_req.rlm.bw = CMD_CBW_160MHZ; + req->bw = CMD_CBW_160MHZ; break; case NL80211_CHAN_WIDTH_5: - rlm_req.rlm.bw = CMD_CBW_5MHZ; + req->bw = CMD_CBW_5MHZ; break; case NL80211_CHAN_WIDTH_10: - rlm_req.rlm.bw = CMD_CBW_10MHZ; + req->bw = CMD_CBW_10MHZ; break; case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: default: - rlm_req.rlm.bw = CMD_CBW_20MHZ; - rlm_req.rlm.ht_op_info = 0; + req->bw = CMD_CBW_20MHZ; + req->ht_op_info = 0; break; } - if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 1; /* SCA */ - else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 3; /* SCB */ - - return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req, - sizeof(rlm_req), true); + if (req->control_channel < req->center_chan) + req->sco = 1; /* SCA */ + else if (req->control_channel > req->center_chan) + req->sco = 3; /* SCB */ } static struct sk_buff * @@ -2014,18 +2229,36 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len) return skb; } +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct sk_buff *skb; + + skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif, + MT7925_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7925_mcu_bss_rlm_tlv(skb, phy, link_conf, ctx); + + return mt76_mcu_skb_send_msg(phy->dev, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); +} + static u8 mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct ieee80211_he_6ghz_capa *he_6ghz_capa; const struct ieee80211_sta_eht_cap *eht_cap; __le16 capa = 0; u8 mode = 0; - if (sta) { - he_6ghz_capa = &sta->deflink.he_6ghz_capa; - eht_cap = &sta->deflink.eht_cap; + if (link_sta) { + he_6ghz_capa = &link_sta->he_6ghz_capa; + eht_cap = &link_sta->eht_cap; } else { struct ieee80211_supported_band *sband; @@ -2061,18 +2294,19 @@ mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, static void mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, struct ieee80211_chanctx_conf *ctx, struct mt76_phy *phy, u16 wlan_idx, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : - &mvif->sta; - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; enum nl80211_band band = chandef->chan->band; struct mt76_connac_bss_basic_tlv *basic_req; + struct mt792x_link_sta *mlink; struct tlv *tlv; int conn_type; u8 idx; @@ -2080,26 +2314,39 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req)); basic_req = (struct mt76_connac_bss_basic_tlv *)tlv; - idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : - mvif->mt76.omac_idx; + idx = mconf->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : + mconf->mt76.omac_idx; basic_req->hw_bss_idx = idx; - basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta); + basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, + link_sta); if (band == NL80211_BAND_2GHZ) basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX); else basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX); - memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN); - basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta); - basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); - basic_req->dtim_period = vif->bss_conf.dtim_period; + memcpy(basic_req->bssid, link_conf->bssid, ETH_ALEN); + basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, link_sta); + basic_req->bcn_interval = cpu_to_le16(link_conf->beacon_int); + basic_req->dtim_period = link_conf->dtim_period; basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); - basic_req->sta_idx = cpu_to_le16(msta->wcid.idx); - basic_req->omac_idx = mvif->mt76.omac_idx; - basic_req->band_idx = mvif->mt76.band_idx; - basic_req->wmm_idx = mvif->mt76.wmm_idx; + basic_req->link_idx = mconf->mt76.idx; + + if (link_sta) { + struct mt792x_sta *msta; + + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + + } else { + mlink = &mconf->vif->sta.deflink; + } + + basic_req->sta_idx = cpu_to_le16(mlink->wcid.idx); + basic_req->omac_idx = mconf->mt76.omac_idx; + basic_req->band_idx = mconf->mt76.band_idx; + basic_req->wmm_idx = mconf->mt76.wmm_idx; basic_req->conn_state = !enable; switch (vif->type) { @@ -2131,9 +2378,11 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, } static void -mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, + struct ieee80211_bss_conf *link_conf) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt76_vif *mvif = &mconf->mt76; struct bss_sec_tlv { __le16 tag; __le16 len; @@ -2178,12 +2427,13 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) static void mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf) { - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef; - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); enum nl80211_band band = chandef->chan->band; + struct mt76_vif *mvif = &mconf->mt76; struct bss_rate_tlv *bmc; struct tlv *tlv; u8 idx = mvif->mcast_rates_idx ? @@ -2205,39 +2455,44 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, static void mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - bool is_mld = ieee80211_vif_is_mld(vif); + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; struct bss_mld_tlv *mld; struct tlv *tlv; + bool is_mld; + + is_mld = ieee80211_vif_is_mld(link_conf->vif) || + (hweight16(mvif->valid_links) > 1); tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld)); mld = (struct bss_mld_tlv *)tlv; - mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff; - mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff; - mld->own_mld_id = mvif->mt76.idx + 32; + mld->link_id = is_mld ? link_conf->link_id : 0xff; + /* apply the index of the primary link */ + mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff; + mld->own_mld_id = mconf->mt76.idx + 32; mld->remap_idx = 0xff; + mld->eml_enable = !!(link_conf->vif->cfg.eml_cap & + IEEE80211_EML_CAP_EMLSR_SUPP); - if (sta) - memcpy(mld->mac_addr, sta->addr, ETH_ALEN); + memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN); } static void -mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf) { struct mt76_connac_bss_qos_tlv *qos; struct tlv *tlv; tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos)); qos = (struct mt76_connac_bss_qos_tlv *)tlv; - qos->qos = vif->bss_conf.qos; + qos->qos = link_conf->qos; } static void -mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, +mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf, struct mt792x_phy *phy) { #define DEFAULT_HE_PE_DURATION 4 @@ -2246,16 +2501,16 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct bss_info_uni_he *he; struct tlv *tlv; - cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); + cap = mt76_connac_get_he_phy_cap(phy->mt76, link_conf->vif); tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he)); he = (struct bss_info_uni_he *)tlv; - he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; + he->he_pe_duration = link_conf->htc_trig_based_pkt_ext; if (!he->he_pe_duration) he->he_pe_duration = DEFAULT_HE_PE_DURATION; - he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); + he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th); if (!he->he_rts_thres) he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); @@ -2265,7 +2520,7 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } static void -mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, +mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf, bool enable) { struct bss_info_uni_bss_color *color; @@ -2275,15 +2530,16 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, color = (struct bss_info_uni_bss_color *)tlv; color->enable = enable ? - vif->bss_conf.he_bss_color.enabled : 0; + link_conf->he_bss_color.enabled : 0; color->bss_color = enable ? - vif->bss_conf.he_bss_color.color : 0; + link_conf->he_bss_color.color : 0; } static void -mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; struct mt792x_phy *phy = mvif->phy; struct bss_ifs_time_tlv *ifs_time; struct tlv *tlv; @@ -2295,18 +2551,18 @@ mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) } int mt7925_mcu_set_timing(struct mt792x_phy *phy, - struct ieee80211_vif *vif) + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct mt792x_dev *dev = phy->dev; struct sk_buff *skb; - skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76, MT7925_BSS_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - mt7925_mcu_bss_ifs_tlv(skb, vif); + mt7925_mcu_bss_ifs_tlv(skb, link_conf); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD(BSS_INFO_UPDATE), true); @@ -2314,41 +2570,42 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy, int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, int enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct mt792x_dev *dev = phy->dev; + struct mt792x_link_sta *mlink_bc; struct sk_buff *skb; - int err; - skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76, MT7925_BSS_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - /* bss_basic must be first */ - mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76, - mvif->sta.wcid.idx, enable); - mt7925_mcu_bss_sec_tlv(skb, vif); + mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id); - mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta); - mt7925_mcu_bss_qos_tlv(skb, vif); - mt7925_mcu_bss_mld_tlv(skb, vif, sta); - mt7925_mcu_bss_ifs_tlv(skb, vif); + /* bss_basic must be first */ + mt7925_mcu_bss_basic_tlv(skb, link_conf, link_sta, ctx, phy->mt76, + mlink_bc->wcid.idx, enable); + mt7925_mcu_bss_sec_tlv(skb, link_conf); + mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, link_conf); + mt7925_mcu_bss_qos_tlv(skb, link_conf); + mt7925_mcu_bss_mld_tlv(skb, link_conf); + mt7925_mcu_bss_ifs_tlv(skb, link_conf); - if (vif->bss_conf.he_support) { - mt7925_mcu_bss_he_tlv(skb, vif, phy); - mt7925_mcu_bss_color_tlv(skb, vif, enable); + if (link_conf->he_support) { + mt7925_mcu_bss_he_tlv(skb, link_conf, phy); + mt7925_mcu_bss_color_tlv(skb, link_conf, enable); } - err = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD(BSS_INFO_UPDATE), true); - if (err < 0) - return err; + if (enable) + mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx); - return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); } int mt7925_mcu_set_dbdc(struct mt76_phy *phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index b8315a89f4a9..ac53bdc99332 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -366,7 +366,10 @@ struct bss_mld_tlv { u8 mac_addr[ETH_ALEN]; u8 remap_idx; u8 link_id; - u8 __rsv[2]; + u8 eml_enable; + u8 max_link_num; + u8 hybrid_mode; + u8 __rsv[3]; } __packed; struct sta_rec_ba_uni { @@ -440,6 +443,17 @@ struct sta_rec_mld { } __packed link[2]; } __packed; +struct sta_rec_eht_mld { + __le16 tag; + __le16 len; + u8 nsep; + u8 mld_type; + u8 __rsv1[1]; + u8 str_cap[3]; + u8 eml_cap[3]; + u8 __rsv2[3]; +} __packed; + struct bss_ifs_time_tlv { __le16 tag; __le16 len; @@ -456,6 +470,21 @@ struct bss_ifs_time_tlv { __le16 eifs_cck_time; } __packed; +struct bss_rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 ht_op_info; + u8 sco; + u8 band; + u8 pad[3]; +} __packed; + #define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct sta_rec_basic) + \ sizeof(struct sta_rec_bf) + \ @@ -474,7 +503,8 @@ struct bss_ifs_time_tlv { sizeof(struct sta_rec_eht) + \ sizeof(struct sta_rec_hdr_trans) + \ sizeof(struct sta_rec_mld) + \ - sizeof(struct tlv)) + sizeof(struct tlv) * 2 + \ + sizeof(struct sta_rec_remove)) #define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct mt76_connac_bss_basic_tlv) + \ @@ -484,6 +514,7 @@ struct bss_ifs_time_tlv { sizeof(struct bss_info_uni_he) + \ sizeof(struct bss_info_uni_bss_color) + \ sizeof(struct bss_ifs_time_tlv) + \ + sizeof(struct bss_rlm_tlv) + \ sizeof(struct tlv)) #define MT_CONNAC3_SKU_POWER_LIMIT 449 @@ -538,6 +569,26 @@ struct mt7925_wow_pattern_tlv { u8 rsv[7]; } __packed; +struct roc_acquire_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 control_channel; + u8 sco; + u8 band; + u8 bw; + u8 center_chan; + u8 center_chan2; + u8 bw_from_ap; + u8 center_chan_from_ap; + u8 center_chan2_from_ap; + u8 reqtype; + __le32 maxinterval; + u8 dbdcband; + u8 rsv[3]; +} __packed; + static inline enum connac3_mcu_cipher_type mt7925_mcu_get_cipher(int cipher) { @@ -578,18 +629,18 @@ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, bool enable); int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, int enable); int mt7925_mcu_set_timing(struct mt792x_phy *phy, - struct ieee80211_vif *vif); + struct ieee80211_bss_conf *link_conf); int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx); int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy); int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, - struct mt76_vif *vif, - struct ieee80211_bss_conf *info); + struct ieee80211_bss_conf *link_conf); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 8a4a71f6bcb6..669f3a079d04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -30,17 +30,22 @@ enum { UNI_ROC_ACQUIRE, UNI_ROC_ABORT, + UNI_ROC_SUB_LINK = 3, UNI_ROC_NUM }; enum mt7925_roc_req { MT7925_ROC_REQ_JOIN, MT7925_ROC_REQ_ROC, + MT7925_ROC_REQ_SUB_LINK, + MT7925_ROC_REQ_MLSR_AG = 10, + MT7925_ROC_REQ_MLSR_AA, MT7925_ROC_REQ_NUM }; enum { UNI_EVENT_ROC_GRANT = 0, + UNI_EVENT_ROC_GRANT_SUB_LINK = 4, UNI_EVENT_ROC_TAG_NUM }; @@ -192,13 +197,15 @@ int __mt7925_start(struct mt792x_phy *phy); int mt7925_register_device(struct mt792x_dev *dev); void mt7925_unregister_device(struct mt792x_dev *dev); int mt7925_run_firmware(struct mt792x_dev *dev); -int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, +int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable); -int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, +int mt7925_mcu_sta_update(struct mt792x_dev *dev, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state); int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag); -int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_bss_conf *bss_conf); int mt7925_mcu_set_eeprom(struct mt792x_dev *dev); int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); @@ -228,6 +235,7 @@ void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt7925_stats_work(struct work_struct *work); void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy); +int mt7925_init_mlo_caps(struct mt792x_phy *phy); int mt7925_init_debugfs(struct mt792x_dev *dev); int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, @@ -241,7 +249,8 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, bool enable); void mt7925_scan_work(struct work_struct *work); void mt7925_roc_work(struct work_struct *work); -int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf); void mt7925_coredump_work(struct work_struct *work); int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txpwr *txpwr); @@ -252,7 +261,7 @@ void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, struct ieee80211_key_conf *key, int pid, enum mt76_txq_id qid, u32 changed); void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct list_head *free_list); int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq); @@ -291,20 +300,24 @@ int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set); int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, enum environment_cap env_cap); -int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, + int duration, u8 token_id); +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type, u8 token_id); -int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id); int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *wait_seq); int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_connac_sta_key_conf *sta_key_conf, struct ieee80211_key_conf *key, int mcu_cmd, - struct mt76_wcid *wcid, enum set_key_cmd cmd); + struct mt76_wcid *wcid, enum set_key_cmd cmd, + struct mt792x_sta *msta); int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val); int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta, + int link_id); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 577574fb7a1e..6e4f4e78c350 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -373,6 +373,9 @@ static int mt7925_pci_probe(struct pci_dev *pdev, bus_ops->rmw = mt7925_rmw; dev->mt76.bus = bus_ops; + if (!mt7925_disable_aspm && mt76_pci_aspm_supported(pdev)) + dev->aspm_supported = true; + ret = __mt792x_mcu_fw_pmctrl(dev); if (ret) goto err_free_dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c index 9fca887977d2..faedbf766d1a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c @@ -34,9 +34,9 @@ int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } @@ -60,7 +60,7 @@ void mt7925_tx_token_put(struct mt792x_dev *dev) spin_lock_bh(&dev->mt76.token_lock); idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7925_txwi_free(dev, txwi, NULL, false, NULL); + mt7925_txwi_free(dev, txwi, NULL, NULL, NULL); dev->mt76.token_count--; } spin_unlock_bh(&dev->mt76.token_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index cf14a38c5e72..7fa74d59cc48 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -27,6 +27,7 @@ #define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) #define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1) +#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2) /* NOTE: used to map mt76_rates. idx may change if firmware expands table */ #define MT792x_BASIC_RATES_TBL 11 @@ -81,11 +82,9 @@ enum mt792x_reg_power_type { DECLARE_EWMA(avg_signal, 10, 8) -struct mt792x_sta { +struct mt792x_link_sta { struct mt76_wcid wcid; /* must be first */ - struct mt792x_vif *vif; - u32 airtime_ac[8]; int ack_signal; @@ -94,21 +93,46 @@ struct mt792x_sta { unsigned long last_txs; struct mt76_connac_sta_key_conf bip; + + struct mt792x_sta *sta; + + struct ieee80211_link_sta *pri_link; +}; + +struct mt792x_sta { + struct mt792x_link_sta deflink; /* must be first */ + struct mt792x_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + struct mt792x_vif *vif; + + u16 valid_links; + u8 deflink_id; }; DECLARE_EWMA(rssi, 10, 8); -struct mt792x_vif { +struct mt792x_chanctx { + struct mt792x_bss_conf *bss_conf; +}; + +struct mt792x_bss_conf { struct mt76_vif mt76; /* must be first */ + struct mt792x_vif *vif; + struct ewma_rssi rssi; + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + unsigned int link_id; +}; + +struct mt792x_vif { + struct mt792x_bss_conf bss_conf; /* must be first */ + struct mt792x_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; struct mt792x_sta sta; struct mt792x_sta *wep_sta; struct mt792x_phy *phy; - - struct ewma_rssi rssi; - - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + u16 valid_links; + u8 deflink_id; }; struct mt792x_phy { @@ -140,6 +164,7 @@ struct mt792x_phy { #endif void *clc[MT792x_CLC_MAX_NUM]; u64 chip_cap; + u16 eml_cap; struct work_struct roc_work; struct timer_list roc_timer; @@ -190,6 +215,7 @@ struct mt792x_dev { bool fw_assert:1; bool has_eht:1; bool regd_in_progress:1; + bool aspm_supported:1; wait_queue_head_t wait; struct work_struct init_work; @@ -211,6 +237,66 @@ struct mt792x_dev { u32 backup_l2; }; +static inline struct mt792x_bss_conf * +mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id) +{ + struct ieee80211_vif *vif; + + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); + + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &mvif->bss_conf; + + return rcu_dereference_protected(mvif->link_conf[link_id], + lockdep_is_held(&mvif->phy->dev->mt76.mutex)); +} + +static inline struct mt792x_link_sta * +mt792x_sta_to_link(struct mt792x_sta *msta, u8 link_id) +{ + struct ieee80211_vif *vif; + + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &msta->deflink; + + return rcu_dereference_protected(msta->link[link_id], + lockdep_is_held(&msta->vif->phy->dev->mt76.mutex)); +} + +static inline struct mt792x_bss_conf * +mt792x_link_conf_to_mconf(struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + return mt792x_vif_to_link(mvif, link_conf->link_id); +} + +static inline struct ieee80211_bss_conf * +mt792x_vif_to_bss_conf(struct ieee80211_vif *vif, unsigned int link_id) +{ + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &vif->bss_conf; + + return link_conf_dereference_protected(vif, link_id); +} + +static inline struct ieee80211_link_sta * +mt792x_sta_to_link_sta(struct ieee80211_vif *vif, struct ieee80211_sta *sta, + unsigned int link_id) +{ + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &sta->deflink; + + return link_sta_dereference_protected(sta, link_id); +} + static inline struct mt792x_dev * mt792x_hw_dev(struct ieee80211_hw *hw) { @@ -325,6 +411,9 @@ mt792x_get_mac80211_ops(struct device *dev, int mt792x_init_wcid(struct mt792x_dev *dev); int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev); int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev); +void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, + struct mt792x_bss_conf *mconf, + struct mt792x_link_sta *mlink); static inline char *mt792x_ram_name(struct mt792x_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index 4adca99eb9b8..78fe37c2e07b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -59,20 +59,42 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + u8 link_id; int qid; if (control->sta) { + struct mt792x_link_sta *mlink; struct mt792x_sta *sta; - + link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); sta = (struct mt792x_sta *)control->sta->drv_priv; - wcid = &sta->wcid; + mlink = mt792x_sta_to_link(sta, link_id); + wcid = &mlink->wcid; } if (vif && !control->sta) { struct mt792x_vif *mvif; mvif = (struct mt792x_vif *)vif->drv_priv; - wcid = &mvif->sta.wcid; + wcid = &mvif->sta.deflink.wcid; + } + + if (vif && control->sta && ieee80211_vif_is_mld(vif)) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *conf; + + link_id = wcid->link_id; + rcu_read_lock(); + conf = rcu_dereference(vif->link_conf[link_id]); + memcpy(hdr->addr2, conf->addr, ETH_ALEN); + + link_sta = rcu_dereference(control->sta->link[link_id]); + memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_STATION) + memcpy(hdr->addr3, conf->bssid, ETH_ALEN); + rcu_read_unlock(); } if (mt76_connac_pm_ref(mphy, &dev->pm)) { @@ -113,31 +135,47 @@ void mt792x_stop(struct ieee80211_hw *hw, bool suspend) } EXPORT_SYMBOL_GPL(mt792x_stop); +void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, + struct mt792x_bss_conf *mconf, + struct mt792x_link_sta *mlink) +{ + struct ieee80211_vif *vif = container_of((void *)mconf->vif, + struct ieee80211_vif, drv_priv); + struct ieee80211_bss_conf *link_conf; + int idx = mlink->wcid.idx; + + link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); + + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); + mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + + dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx); + mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx); + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&mlink->wcid.poll_list)) + list_del_init(&mlink->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + mt76_wcid_cleanup(&dev->mt76, &mlink->wcid); +} +EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove); + void mt792x_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_sta *msta = &mvif->sta; struct mt792x_dev *dev = mt792x_hw_dev(hw); - struct mt792x_phy *phy = mt792x_hw_phy(hw); - int idx = msta->wcid.idx; + struct mt792x_bss_conf *mconf; mt792x_mutex_acquire(dev); - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); - rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + mconf = mt792x_link_conf_to_mconf(&vif->bss_conf); + mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink); - dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); - phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mt792x_mutex_release(dev); - - spin_lock_bh(&dev->mt76.sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); - spin_unlock_bh(&dev->mt76.sta_poll_lock); - - mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } EXPORT_SYMBOL_GPL(mt792x_remove_interface); @@ -149,7 +187,7 @@ int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* no need to update right away, we'll get BSS_CHANGED_QOS */ queue = mt76_connac_lmac_mapping(queue); - mvif->queue_params[queue] = *params; + mvif->bss_conf.queue_params[queue] = *params; return 0; } @@ -178,7 +216,7 @@ u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; + u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; @@ -204,7 +242,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; + u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; @@ -261,11 +299,13 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->mt76.ctx = ctx; + mvif->bss_conf.mt76.ctx = ctx; + mctx->bss_conf = &mvif->bss_conf; mutex_unlock(&dev->mt76.mutex); return 0; @@ -277,11 +317,13 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->mt76.ctx = NULL; + mctx->bss_conf = NULL; + mvif->bss_conf.mt76.ctx = NULL; mutex_unlock(&dev->mt76.mutex); } EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); @@ -405,10 +447,10 @@ mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt76_ethtool_worker_info *wi = wi_data; - if (msta->vif->mt76.idx != wi->idx) + if (msta->vif->bss_conf.mt76.idx != wi->idx) return; - mt76_ethtool_worker(wi, &msta->wcid.stats, true); + mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true); } void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -421,7 +463,7 @@ void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, - .idx = mvif->mt76.idx, + .idx = mvif->bss_conf.mt76.idx, }; int i, ei = 0; @@ -487,7 +529,7 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw, struct station_info *sinfo) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct rate_info *txrate = &msta->wcid.rate; + struct rate_info *txrate = &msta->deflink.wcid.rate; if (!txrate->legacy && !txrate->flags) return; @@ -502,19 +544,19 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.he_dcm = txrate->he_dcm; sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } - sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); - sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); - sinfo->ack_signal = (s8)msta->ack_signal; + sinfo->ack_signal = (s8)msta->deflink.ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); - sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); + sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } EXPORT_SYMBOL_GPL(mt792x_sta_statistics); @@ -556,6 +598,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw) hw->sta_data_size = sizeof(struct mt792x_sta); hw->vif_data_size = sizeof(struct mt792x_vif); + hw->chanctx_data_size = sizeof(struct mt792x_chanctx); if (dev->fw_features & MT792x_FW_CAP_CNM) { wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; @@ -766,6 +809,10 @@ int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); + + if (dev->aspm_supported) + usleep_range(2000, 3000); + if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) break; diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c index eb29434abee1..106273935b26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c @@ -138,6 +138,7 @@ EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats); struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, bool unicast) { + struct mt792x_link_sta *link; struct mt792x_sta *sta; struct mt76_wcid *wcid; @@ -151,11 +152,12 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, if (!wcid->sta) return NULL; - sta = container_of(wcid, struct mt792x_sta, wcid); + link = container_of(wcid, struct mt792x_link_sta, wcid); + sta = container_of(link, struct mt792x_sta, deflink); if (!sta->vif) return NULL; - return &sta->vif->sta.wcid; + return &sta->vif->sta.deflink.wcid; } EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid); @@ -173,7 +175,7 @@ mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!ether_addr_equal(vif->addr, hdr->addr1)) return; - ewma_rssi_add(&mvif->rssi, -status->signal); + ewma_rssi_add(&mvif->bss_conf.rssi, -status->signal); } void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 2c8578677800..2e4fa9f48dfb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -2002,7 +2002,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta); + ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); ra->channel = chandef->chan->hw_value; ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ? CMD_CBW_320MHZ : sta->deflink.bandwidth; @@ -2157,11 +2157,13 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable, bool newly) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt7996_sta *msta; struct sk_buff *skb; int ret; msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid, @@ -2170,7 +2172,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, newly); + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, + enable, newly); if (!enable) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c index 4c1c159fbb62..b5031ca7f73f 100644 --- a/drivers/net/wireless/mediatek/mt76/pci.c +++ b/drivers/net/wireless/mediatek/mt76/pci.c @@ -45,3 +45,26 @@ void mt76_pci_disable_aspm(struct pci_dev *pdev) aspm_conf); } EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm); + +bool mt76_pci_aspm_supported(struct pci_dev *pdev) +{ + struct pci_dev *parent = pdev->bus->self; + u16 aspm_conf, parent_aspm_conf = 0; + bool result = true; + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf); + aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspm_conf); + parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + } + + if (!aspm_conf && (!parent || !parent_aspm_conf)) { + /* aspm already disabled */ + result = false; + } + + return result; +} +EXPORT_SYMBOL_GPL(mt76_pci_aspm_supported); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c index bd5a0603b4a2..3abf14d7044f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c @@ -697,9 +697,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } +#define TX_POWER_INDEX_MAX 0x3F +#define TX_POWER_INDEX_DEFAULT_CCK 0x22 +#define TX_POWER_INDEX_DEFAULT_HT40 0x27 + static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu; + int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; @@ -713,6 +718,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); + for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) { + if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK; + } + + for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) { + if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40; + } + priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 3685dbefc9bd..043fa364e701 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -6679,7 +6679,6 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv) u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN]; u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2]; struct rtl8xxxu_vif *rtlvif; - struct ieee80211_vif *vif; u8 tsftr[8], tsftr_1[8]; int i; @@ -6744,10 +6743,7 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv) /* write bcn ctl */ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1); rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl); - - vif = priv->vifs[0]; - priv->vifs[0] = priv->vifs[1]; - priv->vifs[1] = vif; + swap(priv->vifs[0], priv->vifs[1]); /* priv->vifs[0] is NULL here, based on how this function is currently * called from rtl8xxxu_add_interface(). diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 37bb59fa8bfa..35875cda30fc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -27,7 +27,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index e20f2bec45c4..ce7c28d9c874 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -31,7 +31,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index f5ce4889523e..e36e4aeb9a95 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -32,7 +32,7 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 7bde20fdbeab..162e734d5b08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -31,7 +31,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 675bdd32feb1..bbf8ff63dced 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -27,7 +27,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 2; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index dd7505e2f22c..1b144fbd4d26 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -33,7 +33,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 162c34f0e9b7..0a92d0325098 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -32,7 +32,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 7b911695db33..a65503c5ae5a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -30,7 +30,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 0dba8aae7716..564f5988ee82 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -1201,6 +1201,15 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev, rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); + + if (rtwdev->hci.type == RTW_HCI_TYPE_USB) { + rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM, + chip->usb_tx_agg_desc_num); + + rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num); + rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1)); + } + rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 49894331f7b4..49a3fd4fb7dc 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1197,6 +1197,8 @@ struct rtw_chip_info { u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; + u8 usb_tx_agg_desc_num; + u8 default_1ss_tx_path; bool path_div_supported; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index b122f226924b..02ef9a77316b 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -270,6 +270,7 @@ #define BIT_MASK_BCN_HEAD_1_V1 0xfff #define REG_AUTO_LLT_V1 0x0208 #define BIT_AUTO_INIT_LLT_V1 BIT(0) +#define BIT_MASK_BLK_DESC_NUM GENMASK(7, 4) #define REG_DWBCN0_CTRL 0x0208 #define BIT_BCN_VALID BIT(16) #define REG_TXDMA_OFFSET_CHK 0x020C diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c index 8919f9e11f03..222608de33cd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -2013,6 +2013,7 @@ const struct rtw_chip_info rtw8703b_hw_spec = { .tx_stbc = false, .max_power_index = 0x3f, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */ .path_div_supported = false, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index f8df4c84d39f..3fba4054d45f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -2171,6 +2171,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .band = RTW_BAND_2G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 1, .ht_supported = true, .vht_supported = false, .lps_deep_mode_supported = 0, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index fe5d8e188350..526e8de77b3e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -2008,6 +2008,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 3017a9760da8..2456ff242818 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2548,6 +2548,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index cd965edc29ce..62376d1cca22 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -5366,6 +5366,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 3, .default_1ss_tx_path = BB_PATH_A, .path_div_supported = true, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index d204d138afe2..a55ca5a24227 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -379,7 +379,9 @@ static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list skb_iter = skb_peek(list); - if (skb_iter && skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ) + if (skb_iter && + skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ && + agg_num < rtwdev->chip->usb_tx_agg_desc_num) __skb_unlink(skb_iter, list); else skb_iter = NULL; @@ -740,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = { static int rtw_usb_init_rx(struct rtw_dev *rtwdev) { struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); - int i; rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq"); if (!rtwusb->rxwq) { @@ -752,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev) INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); + return 0; +} + +static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + int i; + for (i = 0; i < RTW_USB_RXCB_NUM; i++) { struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; rtw_usb_rx_resubmit(rtwusb, rxcb); } - - return 0; } static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) @@ -895,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err_destroy_rxwq; } + rtw_usb_setup_rx(rtwdev); + return 0; err_destroy_rxwq: diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index eaea4eaeb361..3c9f864805b1 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -22,6 +22,9 @@ config RTW89_8851B config RTW89_8852A tristate +config RTW89_8852B_COMMON + tristate + config RTW89_8852B tristate @@ -59,6 +62,7 @@ config RTW89_8852BE select RTW89_CORE select RTW89_PCI select RTW89_8852B + select RTW89_8852B_COMMON help Select this option will enable support for 8852BE chipset diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile index 86a553fb0136..1f1050a7a89d 100644 --- a/drivers/net/wireless/realtek/rtw89/Makefile +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -17,7 +17,8 @@ rtw89_core-y += core.o \ ps.o \ chan.o \ ser.o \ - acpi.o + acpi.o \ + util.o rtw89_core-$(CONFIG_PM) += wow.o @@ -39,6 +40,9 @@ rtw89_8852a-objs := rtw8852a.o \ obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o rtw89_8852ae-objs := rtw8852ae.o +obj-$(CONFIG_RTW89_8852B_COMMON) += rtw89_8852b_common.o +rtw89_8852b_common-objs := rtw8852b_common.o + obj-$(CONFIG_RTW89_8852B) += rtw89_8852b.o rtw89_8852b-objs := rtw8852b.o \ rtw8852b_table.o \ diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 35291efbbae9..4557c6e035a9 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -523,6 +523,7 @@ static u8 rtw89_get_addr_cam_entry_size(struct rtw89_dev *rtwdev) case RTL8852A: case RTL8852B: case RTL8851B: + case RTL8852BT: return ADDR_CAM_ENT_SIZE; default: return ADDR_CAM_ENT_SHORT_SIZE; diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 3b1997223cc5..7f90d93dcdc0 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -141,6 +141,28 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, return band_changed; } +int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, + int (*iterator)(const struct rtw89_chan *chan, + void *data), + void *data) +{ + struct rtw89_hal *hal = &rtwdev->hal; + const struct rtw89_chan *chan; + int ret; + u8 idx; + + lockdep_assert_held(&rtwdev->mutex); + + for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) { + chan = rtw89_chan_get(rtwdev, idx); + ret = iterator(chan, data); + if (ret) + return ret; + } + + return 0; +} + static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct cfg80211_chan_def *chandef, diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index ffa412f281f3..5278ff8c513b 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -78,6 +78,10 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct rtw89_chan *new); +int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, + int (*iterator)(const struct rtw89_chan *chan, + void *data), + void *data); void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct cfg80211_chan_def *chandef); diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index c443b39ab3c6..24929ef534e0 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -91,7 +91,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = { [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX), [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX), [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO), - [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX), + [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX), [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO), [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX), [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO), @@ -228,6 +228,7 @@ static u32 chip_id_to_bt_rom_code_id(u32 id) case RTL8852A: case RTL8852B: case RTL8852C: + case RTL8852BT: return 0x8852; case RTL8851B: return 0x8851; @@ -3616,6 +3617,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) struct rtw89_btc_wl_info *wl = &btc->cx.wl; u8 type, null_role; u32 tbl_w1, tbl_b1, tbl_b4; + u16 dur_2; type = FIELD_GET(BTC_CXP_MASK, policy_type); @@ -3726,7 +3728,21 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) if (hid->exist || hfp->exist) tbl_w1 = cxtbl[16]; + dur_2 = dm->e2g_slot_limit; + switch (policy_type) { + case BTC_CXP_OFFE_2GBWISOB: /* for normal-case */ + _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_ISO); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_dur(btc, CXST_EBT, dur_2); + break; + case BTC_CXP_OFFE_2GISOB: /* for bt no-link */ + _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_ISO); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_dur(btc, CXST_EBT, dur_2); + break; case BTC_CXP_OFFE_DEF: _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur, s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype); @@ -3746,6 +3762,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); break; + case BTC_CXP_OFFE_2GBWMIXB: + _slot_set(btc, CXST_E2G, 0, 0x55555555, SLOT_MIX); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + cpu_to_le32(0x55555555), s_def[CXST_EBT].cxtype); + break; + case BTC_CXP_OFFE_WL: /* for 4-way */ + _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_MIX); + _slot_set(btc, CXST_EBT, 0, cxtbl[1], SLOT_MIX); + break; default: break; } @@ -9514,7 +9539,7 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt u32 val, status; if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) { + chip->chip_id == RTL8851B || chip->chip_id == RTL8852BT) { rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 3e99b63a7995..7019f7d482a8 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -1921,7 +1921,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, return; if (ieee80211_is_beacon(hdr->frame_control)) { - if (vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION && + !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) { rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len); rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); } @@ -3375,8 +3376,12 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { /* for station mode, assign the mac_id from itself */ rtwsta->mac_id = rtwvif->mac_id; - /* must do rtw89_reg_6ghz_power_recalc() before rfk channel */ - rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, true); + + /* must do rtw89_reg_6ghz_recalc() before rfk channel */ + ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true); + if (ret) + return ret; + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_START); rtw89_chip_rfk_channel(rtwdev); @@ -3564,7 +3569,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, int ret; if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { - rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, false); + rtw89_reg_6ghz_recalc(rtwdev, rtwvif, false); rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_DIS_CONN); } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { @@ -4054,15 +4059,15 @@ void rtw89_core_update_beacon_work(struct work_struct *work) int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond) { struct completion *cmpl = &wait->completion; - unsigned long timeout; + unsigned long time_left; unsigned int cur; cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond); if (cur != RTW89_WAIT_COND_IDLE) return -EBUSY; - timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); - if (timeout == 0) { + time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); + if (time_left == 0) { atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); return -ETIMEDOUT; } @@ -4382,7 +4387,7 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) rtwdev->hal.cv = cv; - if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val); if (ret) return; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 68221f0b156e..11fa003a9788 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -132,6 +132,7 @@ enum rtw89_hci_type { enum rtw89_core_chip_id { RTL8852A, RTL8852B, + RTL8852BT, RTL8852C, RTL8851B, RTL8922A, @@ -745,6 +746,14 @@ enum rtw89_reg_6ghz_power { RTW89_REG_6GHZ_POWER_DFLT = RTW89_REG_6GHZ_POWER_VLP, }; +#define RTW89_MIN_VALID_POWER_CONSTRAINT (-10) /* unit: dBm */ + +/* calculate based on ieee80211 Transmit Power Envelope */ +struct rtw89_reg_6ghz_tpe { + bool valid; + s8 constraint; /* unit: dBm */ +}; + enum rtw89_fw_pkt_ofld_type { RTW89_PKT_OFLD_TYPE_PROBE_RSP = 0, RTW89_PKT_OFLD_TYPE_PS_POLL = 1, @@ -1329,6 +1338,7 @@ struct rtw89_btc_wl_smap { u32 scan: 1; u32 connecting: 1; u32 roaming: 1; + u32 dbccing: 1; u32 transacting: 1; u32 _4way: 1; u32 rf_off: 1; @@ -3395,6 +3405,7 @@ struct rtw89_vif { bool chanctx_assigned; /* only valid when running with chanctx_ops */ enum rtw89_sub_entity_idx sub_entity_idx; enum rtw89_reg_6ghz_power reg_6ghz_power; + struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; u8 mac_id; u8 port; @@ -4235,7 +4246,7 @@ struct rtw89_chip_info { const u32 *c2h_regs; struct rtw89_reg_def c2h_counter_reg; const struct rtw89_page_regs *page_regs; - u32 wow_reason_reg; + const u32 *wow_reason_reg; bool cfo_src_fd; bool cfo_hw_comp; const struct rtw89_reg_def *dcfo_comp; @@ -4345,6 +4356,7 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_NO_LPS_PG, RTW89_FW_FEATURE_BEACON_FILTER, RTW89_FW_FEATURE_MACID_PAUSE_SLEEP, + RTW89_FW_FEATURE_WOW_REASON_V1, }; struct rtw89_fw_suit { @@ -4683,7 +4695,12 @@ struct rtw89_dack_info { bool msbk_timeout[RTW89_DACK_PATH_NR]; }; -#define RTW89_RFK_CHS_NR 3 +enum rtw89_rfk_chs_nrs { + __RTW89_RFK_CHS_NR_V0 = 2, + __RTW89_RFK_CHS_NR_V1 = 3, + + RTW89_RFK_CHS_NR = __RTW89_RFK_CHS_NR_V1, +}; struct rtw89_rfk_mcc_info { u8 ch[RTW89_RFK_CHS_NR]; @@ -4762,6 +4779,8 @@ struct rtw89_dpk_info { u8 cur_idx[RTW89_DPK_RF_PATH]; u8 cur_k_set; struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; + u8 max_dpk_txagc[RTW89_DPK_RF_PATH]; + u32 dpk_order[RTW89_DPK_RF_PATH]; }; struct rtw89_fem_info { @@ -4936,6 +4955,7 @@ struct rtw89_regd { struct rtw89_regulatory_info { const struct rtw89_regd *regd; enum rtw89_reg_6ghz_power reg_6ghz_power; + struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM); @@ -6420,6 +6440,16 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev) } } +static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + + if (chip_id == RTL8852B || chip_id == RTL8851B || chip_id == RTL8852BT) + return true; + + return false; +} + int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel); int rtw89_h2c_tx(struct rtw89_dev *rtwdev, @@ -6520,8 +6550,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, const u8 *mac_addr, bool hw_scan); void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); -void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool active); +int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool active); void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 210f192ac9ec..9e1353cce9cc 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -818,6 +818,28 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] = [RTW89_CHIP_BE] = &dbgfs_txpwr_table_be, }; +static +void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m, + struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_reg_6ghz_tpe *tpe6 = ®ulatory->reg_6ghz_tpe; + + seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n", + chan->band_type, chan->channel, chan->band_width); + + seq_puts(m, "[Regulatory] "); + __print_regd(m, rtwdev, chan); + + if (chan->band_type == RTW89_BAND_6G) { + seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power); + + if (tpe6->valid) + seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint); + } +} + static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; @@ -831,8 +853,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) rtw89_leave_ps_mode(rtwdev); chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - seq_puts(m, "[Regulatory] "); - __print_regd(m, rtwdev, chan); + rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan); seq_puts(m, "[SAR]\n"); rtw89_print_sar(m, rtwdev, chan->freq); @@ -2996,7 +3017,7 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA && sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2) return false; - if (rtwdev->chip->chip_id == RTL8852B && + if (rtw89_is_rtl885xb(rtwdev) && sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) return false; @@ -3531,7 +3552,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) case RX_ENC_HE: seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? - he_gi_str[rate->he_gi] : "N/A"); + he_gi_str[status->he_gi] : "N/A"); break; case RX_ENC_EHT: seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index be39a8468d32..fbe08c162b93 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -13,22 +13,20 @@ #include "ps.h" #include "reg.h" #include "util.h" +#include "wow.h" struct rtw89_eapol_2_of_2 { - struct ieee80211_hdr_3addr hdr; u8 gtkbody[14]; u8 key_des_ver; u8 rsvd[92]; -} __packed __aligned(2); +} __packed; struct rtw89_sa_query { - struct ieee80211_hdr_3addr hdr; u8 category; u8 action; -} __packed __aligned(2); +} __packed; struct rtw89_arp_rsp { - struct ieee80211_hdr_3addr addr; u8 llc_hdr[sizeof(rfc1042_header)]; __be16 llc_type; struct arphdr arp_hdr; @@ -36,7 +34,7 @@ struct rtw89_arp_rsp { __be32 sender_ip; u8 target_hw[ETH_ALEN]; __be32 target_ip; -} __packed __aligned(2); +} __packed; static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; @@ -462,7 +460,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, const u8 *mfw = firmware->data; u32 mfw_len = firmware->size; const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; - const struct rtw89_mfw_info *mfw_info; + const struct rtw89_mfw_info *mfw_info = NULL, *tmp; int i; if (mfw_hdr->sig != RTW89_MFW_SIG) { @@ -476,15 +474,27 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, } for (i = 0; i < mfw_hdr->fw_nr; i++) { - mfw_info = &mfw_hdr->info[i]; - if (mfw_info->type == type) { - if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) - goto found; - if (type == RTW89_FW_LOGFMT) - goto found; + tmp = &mfw_hdr->info[i]; + if (tmp->type != type) + continue; + + if (type == RTW89_FW_LOGFMT) { + mfw_info = tmp; + goto found; + } + + /* Version order of WiFi firmware in firmware file are not in order, + * pass all firmware to find the equal or less but closest version. + */ + if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { + if (!mfw_info || mfw_info->cv < tmp->cv) + mfw_info = tmp; } } + if (mfw_info) + goto found; + if (!nowarn) rtw89_err(rtwdev, "no suitable firmware found\n"); return -ENOENT; @@ -606,10 +616,16 @@ int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_fw_suit *fw_suit; - if (hal->cv != elm->u.bbmcu.cv) + /* Version of BB MCU is in decreasing order in firmware file, so take + * first equal or less version, which is equal or less but closest version. + */ + if (hal->cv < elm->u.bbmcu.cv) return 1; /* ignore this element */ fw_suit = rtw89_fw_suit_get(rtwdev, type); + if (fw_suit->data) + return 1; /* ignore this element (a firmware is taken already) */ + fw_suit->data = elm->u.bbmcu.contents; fw_suit->size = le32_to_cpu(elm->size); @@ -659,10 +675,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), + __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -2179,8 +2197,10 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_eapol_2_of_2 *eapol_pkt; + struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; u8 key_des_ver; @@ -2193,17 +2213,21 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, else key_des_ver = 0; - skb = dev_alloc_skb(sizeof(*eapol_pkt)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); if (!skb) return NULL; + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_FCTL_TODS | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(hdr->addr1, bss_conf->bssid); + ether_addr_copy(hdr->addr2, vif->addr); + ether_addr_copy(hdr->addr3, bss_conf->bssid); + + skb_put_zero(skb, sec_hdr_len); + eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); - eapol_pkt->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | - IEEE80211_FCTL_TODS | - IEEE80211_FCTL_PROTECTED); - ether_addr_copy(eapol_pkt->hdr.addr1, bss_conf->bssid); - ether_addr_copy(eapol_pkt->hdr.addr2, vif->addr); - ether_addr_copy(eapol_pkt->hdr.addr3, bss_conf->bssid); memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); eapol_pkt->key_des_ver = key_des_ver; @@ -2215,20 +2239,26 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); + struct ieee80211_hdr_3addr *hdr; struct rtw89_sa_query *sa_query; struct sk_buff *skb; - skb = dev_alloc_skb(sizeof(*sa_query)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); if (!skb) return NULL; + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(hdr->addr1, bss_conf->bssid); + ether_addr_copy(hdr->addr2, vif->addr); + ether_addr_copy(hdr->addr3, bss_conf->bssid); + + skb_put_zero(skb, sec_hdr_len); + sa_query = skb_put_zero(skb, sizeof(*sa_query)); - sa_query->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_ACTION | - IEEE80211_FCTL_PROTECTED); - ether_addr_copy(sa_query->hdr.addr1, bss_conf->bssid); - ether_addr_copy(sa_query->hdr.addr2, vif->addr); - ether_addr_copy(sa_query->hdr.addr3, bss_conf->bssid); sa_query->category = WLAN_CATEGORY_SA_QUERY; sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; @@ -2238,17 +2268,19 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_hdr_3addr *hdr; struct rtw89_arp_rsp *arp_skb; struct arphdr *arp_hdr; struct sk_buff *skb; __le16 fc; - skb = dev_alloc_skb(sizeof(struct rtw89_arp_rsp)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); if (!skb) return NULL; - arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); + hdr = skb_put_zero(skb, sizeof(*hdr)); if (rtw_wow->ptk_alg) fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | @@ -2256,11 +2288,14 @@ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, else fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); - arp_skb->addr.frame_control = fc; - ether_addr_copy(arp_skb->addr.addr1, rtwvif->bssid); - ether_addr_copy(arp_skb->addr.addr2, rtwvif->mac_addr); - ether_addr_copy(arp_skb->addr.addr3, rtwvif->bssid); + hdr->frame_control = fc; + ether_addr_copy(hdr->addr1, rtwvif->bssid); + ether_addr_copy(hdr->addr2, rtwvif->mac_addr); + ether_addr_copy(hdr->addr3, rtwvif->bssid); + + skb_put_zero(skb, sec_hdr_len); + arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); arp_skb->llc_type = htons(ETH_P_ARP); @@ -2461,6 +2496,7 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) struct rtw89_h2c_lps_ch_info *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; + u32 done; int ret; if (chip->chip_gen != RTW89_CHIP_BE) @@ -2484,12 +2520,18 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); + rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); goto fail; } + ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, + true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); + if (ret) + rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); + return 0; fail: dev_kfree_skb_any(skb); @@ -6732,10 +6774,8 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; - if (!enable) { - skb_put_zero(skb, sizeof(*gtk_info)); + if (!enable) goto hdr; - } ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, RTW89_PKT_OFLD_TYPE_EAPOL_KEY, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 01fea0b004d3..c3b4324c621c 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -4659,4 +4659,10 @@ const struct rtw89_rfe_parms * rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, const struct rtw89_rfe_parms *init); +enum rtw89_wow_wakeup_ver { + RTW89_WOW_REASON_V0, + RTW89_WOW_REASON_V1, + RTW89_WOW_REASON_NUM, +}; + #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 824ece03d92d..e2399796aeb1 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1568,6 +1568,8 @@ static int dmac_func_en_ax(struct rtw89_dev *rtwdev) B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN); + if (chip_id == RTL8852BT) + val32 |= B_AX_AXIDMA_CLK_EN; rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); return 0; @@ -1577,7 +1579,7 @@ static int chip_func_en_ax(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id == RTL8852A || chip_id == RTL8852B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_OCP_L1_MASK); @@ -2146,8 +2148,8 @@ int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B || !is_qta_poh(rtwdev)) + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) || + !is_qta_poh(rtwdev)) return 0; return preload_init_set(rtwdev, mac_idx, mode); @@ -2183,8 +2185,7 @@ static void _patch_ss2f_path(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK, @@ -2360,7 +2361,7 @@ static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1); - if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); } @@ -2588,7 +2589,9 @@ static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) case RTL8852A: sifs = WMAC_SPEC_SIFS_OFDM_52A; break; + case RTL8851B: case RTL8852B: + case RTL8852BT: sifs = WMAC_SPEC_SIFS_OFDM_52B; break; default: @@ -2632,6 +2635,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) #define RX_MAX_LEN_UNIT 512 #define PLD_RLS_MAX_PG 127 #define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT) + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; u32 reg, rx_max_len, rx_qta; u16 val; @@ -2652,6 +2656,8 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_RX_DLK_DATA_TIME_MASK); val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, B_AX_RX_DLK_CCA_TIME_MASK); + if (chip_id == RTL8852BT) + val |= B_AX_RX_DLK_RST_EN; rtw89_write16(rtwdev, reg, val); reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); @@ -2668,8 +2674,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) rx_max_len /= RX_MAX_LEN_UNIT; rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); - if (rtwdev->chip->chip_id == RTL8852A && - rtwdev->hal.cv == CHIP_CBV) { + if (chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) { rtw89_write16_mask(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), B_AX_RX_DLK_CCA_TIME_MASK, 0); @@ -2700,7 +2705,7 @@ static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); rtw89_write32(rtwdev, reg, val); - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); } @@ -2766,11 +2771,10 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 reg; int ret; - if (chip_id != RTL8852B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); @@ -3587,13 +3591,11 @@ static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx, static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR, en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS); rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR, en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS); - if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta) + if (!rtw89_is_rtl885xb(rtwdev) && rtwdev->mac.dle_info.c1_rx_qta) rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1, en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS); } @@ -3719,10 +3721,9 @@ static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev) static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val32; - if (chip_id == RTL8852B || chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); return; @@ -3818,7 +3819,7 @@ static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val; - if (chip_id == RTL8851B) + if (chip_id == RTL8851B || chip_id == RTL8852BT) val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN; else val = B_AX_DISPATCHER_CLK_EN; @@ -5201,6 +5202,46 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 case RTW89_MAC_MRC_DEL_SCH_OK: func = H2C_FUNC_DEL_MRC; break; + case RTW89_MAC_MRC_EMPTY_SCH_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: empty sch fail\n"); + return; + case RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: role not exist fail\n"); + return; + case RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: data not found fail\n"); + return; + case RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: get next slot fail\n"); + return; + case RTW89_MAC_MRC_ALT_ROLE_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: alt role fail\n"); + return; + case RTW89_MAC_MRC_ADD_PSTIMER_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: add ps timer fail\n"); + return; + case RTW89_MAC_MRC_MALLOC_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: malloc fail\n"); + return; + case RTW89_MAC_MRC_SWITCH_CH_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: switch ch fail\n"); + return; + case RTW89_MAC_MRC_TXNULL0_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: tx null-0 fail\n"); + return; + case RTW89_MAC_MRC_PORT_FUNC_EN_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: port func en fail\n"); + return; default: rtw89_debug(rtwdev, RTW89_DBG_CHAN, "invalid MRC C2H STS RPT: status %d\n", status); @@ -5463,18 +5504,19 @@ void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u8 val; u16 val16; u32 val32; int ret; rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); - if (rtwdev->chip->chip_id != RTL8851B) + if (chip_id != RTL8851B && chip_id != RTL8852BT) rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); - if (rtwdev->chip->chip_id != RTL8851B) + if (chip_id != RTL8851B && chip_id != RTL8852BT) rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); @@ -5757,8 +5799,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) return false; - else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) + else if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3, B_AX_LTE_MUX_CTRL_PATH >> 24); @@ -6319,9 +6360,30 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, return ret; } +int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable) +{ + struct rtw89_mac_h2c_info h2c_info = {}; + struct rtw89_mac_c2h_info c2h_info = {}; + u32 ret; + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL; + h2c_info.content_len = sizeof(h2c_info.u.hdr); + h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN); + + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); + if (ret) + return ret; + + if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK) + ret = -EINVAL; + + return ret; +} + static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_chip_info *chip = rtwdev->chip; int ret; if (enable_wow) { @@ -6332,12 +6394,19 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) } rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); + rtw89_mac_cpu_io_rx(rtwdev, enable_wow); rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0); rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0); rtw89_write32(rtwdev, R_AX_TF_FWD, 0); rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0); + + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) + rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY); + else + rtw89_write32_set(rtwdev, R_AX_DBG_WOW, + B_AX_DBG_WOW_CPU_IO_RX_EN); } else { ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false); if (ret) { @@ -6345,6 +6414,7 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) return ret; } + rtw89_mac_cpu_io_rx(rtwdev, enable_wow); rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index a580cb719233..d5895516b3ed 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -466,6 +466,16 @@ enum rtw89_mac_mrc_status { RTW89_MAC_MRC_START_SCH_OK = 0, RTW89_MAC_MRC_STOP_SCH_OK = 1, RTW89_MAC_MRC_DEL_SCH_OK = 2, + RTW89_MAC_MRC_EMPTY_SCH_FAIL = 16, + RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL = 17, + RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL = 18, + RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL = 19, + RTW89_MAC_MRC_ALT_ROLE_FAIL = 20, + RTW89_MAC_MRC_ADD_PSTIMER_FAIL = 21, + RTW89_MAC_MRC_MALLOC_FAIL = 22, + RTW89_MAC_MRC_SWITCH_CH_FAIL = 23, + RTW89_MAC_MRC_TXNULL0_FAIL = 24, + RTW89_MAC_MRC_PORT_FUNC_EN_FAIL = 25, }; struct rtw89_mac_ax_coex { @@ -1446,5 +1456,6 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev, enum rtw89_mac_dle_rsvd_qt_type type, struct rtw89_mac_dle_rsvd_qt_cfg *cfg); +int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable); #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 722d09e9fbb5..1508693032cb 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -487,6 +487,9 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_CQM) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); + if (changed & BSS_CHANGED_TPE) + rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true); + mutex_unlock(&rtwdev->mutex); } diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 934bdf3b398f..f212b67771d5 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -2312,26 +2312,6 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev) dump_err_status_dispatcher_be(rtwdev); } -static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable) -{ - struct rtw89_mac_h2c_info h2c_info = {}; - struct rtw89_mac_c2h_info c2h_info = {}; - u32 ret; - - h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL; - h2c_info.content_len = sizeof(h2c_info.u.hdr); - h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN); - - ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); - if (ret) - return ret; - - if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK) - ret = -EINVAL; - - return ret; -} - static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow) { if (enable_wow) { diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 36cb0e351eea..02afeb3acce4 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -183,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, struct sk_buff *skb) { - struct rtw89_pci_rxbd_info *rxbd_info; struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); + struct rtw89_pci_rxbd_info *rxbd_info; + __le32 info; rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; - rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); - rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); - rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); - rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); + info = rxbd_info->dword; + + rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); + rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); + rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); + rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); } static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, @@ -1298,10 +1301,12 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, dma_addr_t dma, u8 *add_info_nr) { struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; + __le16 option; txaddr_info->length = cpu_to_le16(total_len); - txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | - RTW89_PCI_ADDR_NUM(1)); + option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); + option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); + txaddr_info->option = option; txaddr_info->dma = cpu_to_le32(dma); *add_info_nr = 1; @@ -1328,6 +1333,8 @@ u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); + length_option |= u16_encode_bits(upper_32_bits(dma), + B_PCIADDR_HIGH_SEL_V1_MASK); txaddr_info->length_opt = cpu_to_le16(length_option); txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); @@ -1418,6 +1425,7 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, struct sk_buff *skb = tx_req->skb; struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); dma_addr_t dma; + __le16 opt; txdesc = skb_push(skb, txdesc_size); memset(txdesc, 0, txdesc_size); @@ -1430,7 +1438,9 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, } tx_data->dma = dma; - txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); + opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); + txbd->opt = opt; txbd->length = cpu_to_le16(skb->len); txbd->dma = cpu_to_le32(tx_data->dma); skb_queue_tail(&rtwpci->h2c_queue, skb); @@ -1446,6 +1456,7 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { struct rtw89_pci_tx_wd *txwd; + __le16 opt; int ret; /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD @@ -1470,7 +1481,9 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, list_add_tail(&txwd->list, &tx_ring->busy_pages); - txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); + opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); + txbd->opt = opt; txbd->length = cpu_to_le16(txwd->len); txbd->dma = cpu_to_le32(txwd->paddr); @@ -1569,6 +1582,25 @@ const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { }; EXPORT_SYMBOL(rtw89_bd_ram_table_single); +static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 addr = info->wp_sel_addr; + u32 val; + int i; + + if (!info->wp_sel_addr) + return; + + for (i = 0; i < 16; i += 4) { + val = u32_encode_bits(i + 0, MASKBYTE0) | + u32_encode_bits(i + 1, MASKBYTE1) | + u32_encode_bits(i + 2, MASKBYTE2) | + u32_encode_bits(i + 3, MASKBYTE3); + rtw89_write32(rtwdev, addr + i, val); + } +} + static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; @@ -1607,6 +1639,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) rtw89_write32(rtwdev, addr_bdram, val32); } rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); } for (i = 0; i < RTW89_RXCH_NUM; i++) { @@ -1626,10 +1659,13 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) rtw89_write16(rtwdev, addr_num, bd_ring->len); rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); if (info->rx_ring_eq_is_full) rtw89_write16(rtwdev, addr_idx, bd_ring->wp); } + + rtw89_pci_init_wp_16sel(rtwdev); } static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, @@ -2039,7 +2075,7 @@ static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, if (!ret) return 0; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) ret = rtw89_dbi_write8(rtwdev, addr, data); return ret; @@ -2057,7 +2093,7 @@ static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, if (!ret) return 0; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) ret = rtw89_dbi_read8(rtwdev, addr, value); return ret; @@ -2137,10 +2173,9 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, @@ -2150,14 +2185,13 @@ static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; enum rtw89_pcie_phy phy_rate; u16 val16, mgn_set, div_set, tar; u8 val8, bdr_ori; bool l1_flag = false; int ret = 0; - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); @@ -2398,7 +2432,7 @@ static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) + if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); @@ -2428,7 +2462,7 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) + if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); @@ -2438,7 +2472,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, @@ -2451,9 +2485,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, @@ -2715,7 +2747,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) B_AX_PCIE_RX_APPLEN_MASK, 0); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); } else if (chip_id == RTL8852C) { @@ -2723,7 +2755,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (tag_mode == MAC_AX_TAG_SGL) { val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & ~B_AX_LATENCY_CONTROL; @@ -2738,7 +2770,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, info->multi_tag_num); - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, wd_dma_idle_intvl); rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, @@ -2951,7 +2983,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) /* ltr sw trigger */ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { /* ADDR info 8-byte mode */ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, B_AX_HOST_ADDR_INFO_8B_SEL); @@ -2994,6 +3026,27 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, pci_disable_device(pdev); } +static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (!rtwpci->enable_dac) + return; + + switch (chip->chip_id) { + case RTL8852A: + case RTL8852B: + case RTL8851B: + case RTL8852BT: + break; + default: + return; + } + + rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); +} + static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, struct pci_dev *pdev) { @@ -3008,16 +3061,17 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, goto err; } - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) { - rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); - goto err_release_regions; - } - - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) { - rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); - goto err_release_regions; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + if (!ret) { + rtwpci->enable_dac = true; + rtw89_pci_cfg_dac(rtwdev); + } else { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw89_err(rtwdev, + "failed to set dma and consistent mask to 32/36-bit\n"); + goto err_release_regions; + } } resource_len = pci_resource_len(pdev, bar_id); @@ -3168,6 +3222,7 @@ static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, memset(rx_bd, 0, sizeof(*rx_bd)); rx_bd->buf_size = cpu_to_le16(buf_sz); rx_bd->dma = cpu_to_le32(dma); + rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); rx_info->dma = dma; return 0; @@ -3760,7 +3815,7 @@ static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) if (ret) rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, @@ -3812,7 +3867,7 @@ static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) if (ret) rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, @@ -3911,7 +3966,7 @@ static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL, @@ -4108,7 +4163,7 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev) rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, @@ -4142,7 +4197,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev) rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, @@ -4155,6 +4210,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev) } rtw89_pci_l2_hci_ldo(rtwdev); rtw89_pci_disable_eq(rtwdev); + rtw89_pci_cfg_dac(rtwdev); rtw89_pci_filter_out(rtwdev); rtw89_pci_link_cfg(rtwdev); rtw89_pci_l1ss_cfg(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 7666753ae983..48c3ab735db2 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -724,6 +724,11 @@ #define B_AX_CH11_BUSY BIT(1) #define B_AX_CH10_BUSY BIT(0) +#define R_AX_WP_ADDR_H_SEL0_3 0x1334 +#define R_AX_WP_ADDR_H_SEL4_7 0x1338 +#define R_AX_WP_ADDR_H_SEL8_11 0x133C +#define R_AX_WP_ADDR_H_SEL12_15 0x1340 + #define R_BE_HAXI_DMA_STOP1 0xB010 #define B_BE_STOP_WPDMA BIT(31) #define B_BE_STOP_CH14 BIT(14) @@ -823,6 +828,11 @@ #define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308 #define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C +#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420 +#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424 +#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428 +#define R_BE_WP_ADDR_H_SEL12_15_V1 0xB42C + /* Configure */ #define R_AX_PCIE_INIT_CFG2 0x1004 #define B_AX_WD_ITVL_IDLE GENMASK(27, 24) @@ -1055,6 +1065,7 @@ #define RTW89_PCIE_TIMER_CTRL 0x0718 #define RTW89_PCIE_BIT_L1SUB BIT(5) #define RTW89_PCIE_L1_CTRL 0x0719 +#define RTW89_PCIE_BIT_EN_64BITS BIT(5) #define RTW89_PCIE_BIT_CLK BIT(4) #define RTW89_PCIE_BIT_L1 BIT(3) #define RTW89_PCIE_CLK_CTRL 0x0725 @@ -1304,6 +1315,7 @@ struct rtw89_pci_info { u32 rpwm_addr; u32 cpwm_addr; u32 mit_addr; + u32 wp_sel_addr; u32 tx_dma_ch_mask; const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power; const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; @@ -1330,11 +1342,11 @@ struct rtw89_pci_rx_info { u32 fs:1, ls:1, tag:13, len:14; }; -#define RTW89_PCI_TXBD_OPTION_LS BIT(14) - struct rtw89_pci_tx_bd_32 { __le16 length; - __le16 option; + __le16 opt; +#define RTW89_PCI_TXBD_OPT_LS BIT(14) +#define RTW89_PCI_TXBD_OPT_DMA_HI GENMASK(13, 6) __le32 dma; } __packed; @@ -1349,7 +1361,7 @@ struct rtw89_pci_tx_wp_info { #define RTW89_PCI_ADDR_MSDU_LS BIT(15) #define RTW89_PCI_ADDR_LS BIT(14) -#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6)) +#define RTW89_PCI_ADDR_HIGH_MASK GENMASK(13, 6) #define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0)) struct rtw89_pci_tx_addr_info_32 { @@ -1386,7 +1398,8 @@ struct rtw89_pci_rpp_fmt { struct rtw89_pci_rx_bd_32 { __le16 buf_size; - __le16 rsvd; + __le16 opt; +#define RTW89_PCI_RXBD_OPT_DMA_HI GENMASK(13, 6) __le32 dma; } __packed; @@ -1475,6 +1488,7 @@ struct rtw89_pci { bool running; bool low_power; bool under_recovery; + bool enable_dac; struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; struct sk_buff_head h2c_queue; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index a82b4c56a6f4..ad11d1414874 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "fw.h" @@ -1676,7 +1677,7 @@ static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); if (chip->chip_id != RTL8851B) rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); - if (chip->chip_id == RTL8852B) + if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT) rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); /* check 0x8080 */ @@ -1847,6 +1848,36 @@ static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); } +static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); +} + +static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) +{ + const u8 tssi_deviation_point = 0; + const u8 tssi_max_deviation = 2; + + if (dbm <= tssi_deviation_point) + dbm -= tssi_max_deviation; + + return dbm; +} + +static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe; + s8 cstr = S8_MAX; + + if (band == RTW89_BAND_6G && tpe->valid) + cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint); + + return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr); +} + s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, const struct rtw89_rate_desc *rate_desc) { @@ -1921,6 +1952,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; s8 lmt = 0, sar; + s8 cstr; switch (band) { case RTW89_BAND_2G: @@ -1953,8 +1985,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); sar = rtw89_query_sar(rtwdev, freq); + cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); - return min(lmt, sar); + return min3(lmt, sar, cstr); } EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); @@ -2178,6 +2211,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; s8 lmt_ru = 0, sar; + s8 cstr; switch (band) { case RTW89_BAND_2G: @@ -2210,8 +2244,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); sar = rtw89_query_sar(rtwdev, freq); + cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); - return min(lmt_ru, sar); + return min3(lmt_ru, sar, cstr); } static void @@ -5969,6 +6004,74 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif vif->cfg.aid, phy_idx); } +static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc) +{ + return desc->ch != 0; +} + +static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc, + const struct rtw89_chan *chan) +{ + if (!rfk_chan_validate_desc(desc)) + return false; + + if (desc->ch != chan->channel) + return false; + + if (desc->has_band && desc->band != chan->band_type) + return false; + + if (desc->has_bw && desc->bw != chan->band_width) + return false; + + return true; +} + +struct rfk_chan_iter_data { + const struct rtw89_rfk_chan_desc desc; + unsigned int found; +}; + +static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data) +{ + struct rfk_chan_iter_data *iter_data = data; + + if (rfk_chan_is_equivalent(&iter_data->desc, chan)) + iter_data->found++; + + return 0; +} + +u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, + const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, + const struct rtw89_chan *target_chan) +{ + int sel = -1; + u8 i; + + for (i = 0; i < desc_nr; i++) { + struct rfk_chan_iter_data iter_data = { + .desc = desc[i], + }; + + if (rfk_chan_is_equivalent(&desc[i], target_chan)) + return i; + + rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data); + if (!iter_data.found && sel == -1) + sel = i; + } + + if (sel == -1) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "no idle rfk entry; force replace the first\n"); + sel = 0; + } + + return sel; +} +EXPORT_SYMBOL(rtw89_rfk_chan_lookup); + static void _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) { diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 082231ebbee5..d8df553b9cb0 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -129,6 +129,7 @@ #define EDCCA_HL_DIFF_NORMAL 8 #define RSSI_UNIT_CONVER 110 #define EDCCA_UNIT_CONVER 128 +#define EDCCA_PWROFST_DEFAULT 18 enum rtw89_phy_c2h_ra_func { RTW89_PHY_C2H_FUNC_STS_RPT, @@ -714,6 +715,19 @@ enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband sub } } +struct rtw89_rfk_chan_desc { + /* desc is valid iff ch is non-zero */ + u8 ch; + + /* To avoid us from extending old chip code every time, each new + * field must be defined along with a bool flag in positivte way. + */ + bool has_band; + u8 band; + bool has_bw; + u8 bw; +}; + enum rtw89_rfk_flag { RTW89_RFK_F_WRF = 0, RTW89_RFK_F_WM = 1, @@ -949,5 +963,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, + const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, + const struct rtw89_chan *target_chan); #endif diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 01cbd0312102..7df36f3bff0b 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -311,6 +311,9 @@ #define B_AX_S1_LDO2PWRCUT_F BIT(23) #define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21) +#define R_AX_DBG_WOW 0x0504 +#define B_AX_DBG_WOW_CPU_IO_RX_EN BIT(8) + #define R_AX_SEC_CTRL 0x0C00 #define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16) @@ -3169,6 +3172,8 @@ #define R_AX_DLK_PROTECT_CTL_C1 0xEE02 #define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8) #define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4) +#define B_AX_RX_DLK_RST_EN BIT(1) +#define B_AX_RX_DLK_INT_EN BIT(0) #define R_AX_PLCP_HDR_FLTR 0xCE04 #define R_AX_PLCP_HDR_FLTR_C1 0xEE04 @@ -4313,6 +4318,8 @@ #define R_BE_WLCPU_PORT_PC 0x03FC +#define R_BE_DBG_WOW 0x0504 + #define R_BE_DCPU_PLATFORM_ENABLE 0x0888 #define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10) #define B_BE_DCPU_WARM_EN BIT(9) @@ -7811,6 +7818,8 @@ #define B_UPD_P0_EN BIT(31) #define R_EMLSR 0x0044 #define B_EMLSR_PARM GENMASK(27, 12) +#define R_CHK_LPS_STAT 0x0058 +#define B_CHK_LPS_STAT BIT(0) #define R_SPOOF_CG 0x00B4 #define B_SPOOF_CG_EN BIT(17) #define R_CHINFO_SEG 0x00B4 @@ -7827,6 +7836,7 @@ #define B_ANAPAR_PW15_H2 GENMASK(27, 26) #define R_ANAPAR 0x032C #define B_ANAPAR_15 GENMASK(31, 16) +#define B_ANAPAR_EN1 BIT(31) #define B_ANAPAR_ADCCLK BIT(30) #define B_ANAPAR_FLTRST BIT(22) #define B_ANAPAR_CRXBB GENMASK(18, 16) @@ -7868,10 +7878,12 @@ #define R_RXCCA_BE1 0x0520 #define B_RXCCA_BE1_DIS BIT(0) #define R_UPD_CLK_ADC 0x0700 +#define B_UPD_GEN_ON BIT(27) #define B_UPD_CLK_ADC_VAL GENMASK(26, 25) #define B_UPD_CLK_ADC_ON BIT(24) #define B_ENABLE_CCK BIT(5) #define R_RSTB_ASYNC 0x0704 +#define B_RSTB_ASYNC_BW80 GENMASK(9, 8) #define B_RSTB_ASYNC_ALL BIT(1) #define R_P0_ANT_SW 0x0728 #define B_P0_HW_ANTSW_DIS_BY_GNT_BT BIT(12) @@ -7935,6 +7947,8 @@ #define B_MEASUREMENT_TRIG_MSK BIT(2) #define B_CCX_TRIG_OPT_MSK BIT(1) #define B_CCX_EN_MSK BIT(0) +#define R_FAHM 0x0C1C +#define B_RXTD_CKEN BIT(2) #define R_IFS_COUNTER 0x0C28 #define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16) #define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14) @@ -7968,6 +7982,7 @@ #define B_IQK_DPK_RST BIT(0) #define R_TX_COLLISION_T2R_ST 0x0C70 #define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20) +#define B_TXRX_FORCE_VAL GENMASK(9, 0) #define R_TXGATING 0x0C74 #define B_TXGATING_EN BIT(4) #define R_TXRFC 0x0C7C @@ -8028,6 +8043,7 @@ #define B_P0_RFMODE_FTM_RX GENMASK(11, 0) #define R_P0_NRBW 0x12B8 #define B_P0_NRBW_DBG BIT(30) +#define B_P0_NRBW_RSTB BIT(28) #define R_S0_RXDC 0x12D4 #define B_S0_RXDC_I GENMASK(25, 16) #define B_S0_RXDC_Q GENMASK(31, 26) @@ -8109,6 +8125,8 @@ #define R_S0_ADDCK 0x1E00 #define B_S0_ADDCK_I GENMASK(9, 0) #define B_S0_ADDCK_Q GENMASK(19, 10) +#define R_TXCKEN_FORCE 0x2008 +#define B_TXCKEN_FORCE_ALL GENMASK(24, 0) #define R_EDCCA_RPT_SEL 0x20CC #define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0) #define R_ADC_FIFO 0x20fc @@ -8264,6 +8282,7 @@ #define R_DCFO_COMP_S0 0x448C #define B_DCFO_COMP_S0_MSK GENMASK(11, 0) #define R_DCFO_WEIGHT 0x4490 +#define B_DAC_CLK_IDX BIT(31) #define B_DCFO_WEIGHT_MSK GENMASK(27, 24) #define R_DCFO_OPT 0x4494 #define B_DCFO_OPT_EN BIT(29) @@ -8379,6 +8398,7 @@ #define B_CDD_EVM_CHK_EN BIT(0) #define R_PATH0_BAND_SEL_V1 0x4738 #define B_PATH0_BAND_SEL_MSK_V1 BIT(17) +#define B_PATH0_BAND_NRBW_EN_V1 BIT(16) #define R_PATH0_BT_SHARE_V1 0x4738 #define B_PATH0_BT_SHARE_V1 BIT(19) #define R_PATH0_BTG_PATH_V1 0x4738 @@ -8422,6 +8442,7 @@ #define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8) #define R_PATH1_BAND_SEL_V1 0x4AA4 #define B_PATH1_BAND_SEL_MSK_V1 BIT(17) +#define B_PATH1_BAND_NRBW_EN_V1 BIT(16) #define R_PATH1_BT_SHARE_V1 0x4AA4 #define B_PATH1_BT_SHARE_V1 BIT(19) #define R_PATH1_BTG_PATH_V1 0x4AA4 @@ -8442,6 +8463,8 @@ #define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30) #define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29) #define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6) +#define R_PWOFST 0x488C +#define B_PWOFST GENMASK(21, 17) #define R_2P4G_BAND 0x4970 #define B_2P4G_BAND_SEL BIT(1) #define R_FC0_BW 0x4974 @@ -8622,6 +8645,8 @@ #define B_P0_TMETER GENMASK(15, 10) #define B_P0_TMETER_DIS BIT(16) #define B_P0_TMETER_TRK BIT(24) +#define R_P0_ADCFF_EN 0x58C8 +#define B_P0_ADCFF_EN BIT(24) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P0_TSSI_TRK 0x5818 @@ -8633,7 +8658,9 @@ #define B_P0_TSSI_EN BIT(31) #define B_P0_TSSI_AVG GENMASK(15, 12) #define R_P0_RFCTM 0x5864 +#define B_P0_CLKG_FORCE GENMASK(31, 30) #define B_P0_RFCTM_EN BIT(29) +#define B_P0_GOT_TXRX GENMASK(28, 27) #define B_P0_RFCTM_VAL GENMASK(25, 20) #define R_P0_RFCTM_RDY BIT(26) #define R_P0_TRSW 0x5868 @@ -8666,12 +8693,14 @@ #define B_P0_RFM_BT_EN BIT(5) #define B_P0_RFM_OUT GENMASK(4, 0) #define R_P0_PATH_RST 0x58AC +#define B_P0_PATH_RST BIT(27) #define R_P0_TXDPD 0x58D4 #define B_P0_TXDPD GENMASK(31, 28) #define R_P0_TXPW_RSTB 0x58DC #define B_P0_TXPW_RSTB_MANON BIT(30) #define B_P0_TXPW_RSTB_TSSI BIT(31) #define R_P0_TSSI_MV_AVG 0x58E4 +#define B_P0_TXPW_RSTB GENMASK(28, 27) #define B_P0_TSSI_MV_MIX GENMASK(19, 11) #define B_P0_TSSI_MV_AVG GENMASK(13, 11) #define B_P0_TSSI_MV_CLR BIT(14) @@ -8796,6 +8825,10 @@ #define B_P1_TSSI_ALIM2 GENMASK(29, 0) #define R_P1_TSSI_ADC_CLK 0x766c #define B_P1_TSSI_ADC_CLK GENMASK(17, 16) +#define R_P1_TXAGC_TH 0x7800 +#define B_P1_TXAGC_MAXMIN GENMASK(15, 0) +#define R_P1_TXPW_FORCE 0x780C +#define B_P1_TXPW_RDY BIT(15) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P1_TMETER 0x7810 @@ -8811,14 +8844,20 @@ #define B_P1_TSSI_EN BIT(31) #define B_P1_TSSI_AVG GENMASK(15, 12) #define R_P1_RFCTM 0x7864 +#define B_P1_CLKG_FORCE GENMASK(31, 30) +#define B_P1_GOT_TXRX GENMASK(28, 27) #define R_P1_RFCTM_RDY BIT(26) #define B_P1_RFCTM_VAL GENMASK(25, 20) #define B_P1_RFCTM_DEL GENMASK(19, 11) #define R_P1_PATH_RST 0x78AC +#define B_P1_PATH_RST BIT(27) +#define R_P1_ADCFF_EN 0x78C8 +#define B_P1_ADCFF_EN BIT(24) #define R_P1_TXPW_RSTB 0x78DC #define B_P1_TXPW_RSTB_MANON BIT(30) #define B_P1_TXPW_RSTB_TSSI BIT(31) #define R_P1_TSSI_MV_AVG 0x78E4 +#define B_P1_TXPW_RSTB GENMASK(28, 27) #define B_P1_TSSI_MV_MIX GENMASK(19, 11) #define B_P1_TSSI_MV_AVG GENMASK(13, 11) #define B_P1_TSSI_MV_CLR BIT(14) @@ -9003,6 +9042,7 @@ #define R_IQRSN 0x8220 #define B_IQRSN_K1 BIT(28) #define B_IQRSN_K2 BIT(16) +#define R_DPD_CH0B 0x82BC #define R_RXCFIR_P0C0 0x8D40 #define R_RXCFIR_P0C1 0x8D84 #define R_RXCFIR_P0C2 0x8DC8 @@ -9036,15 +9076,18 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMASK(7, 0) +#define R_RFK_ST 0xBFF8 #define R_DCOF0 0xC000 #define B_DCOF0_RST BIT(17) #define B_DCOF0_V GENMASK(4, 1) #define R_DCOF1 0xC004 +#define B_DCOF1_VAL GENMASK(31, 20) #define B_DCOF1_RST BIT(17) #define B_DCOF1_S BIT(0) #define R_DCOF8 0xC020 #define B_DCOF8_V GENMASK(4, 1) #define R_DCOF9 0xC024 +#define B_DCOF9_VAL GENMASK(31, 20) #define B_DCOF9_RST BIT(17) #define R_DACK_S0P0 0xC040 #define B_DACK_S0P0_OK BIT(31) @@ -9095,6 +9138,7 @@ #define R_ADCMOD 0xC0E8 #define B_ADCMOD_LP GENMASK(31, 16) #define R_DCIM 0xC0EC +#define B_DCIM_RC GENMASK(23, 16) #define B_DCIM_FR GENMASK(14, 13) #define R_ADDCK0D 0xC0F0 #define B_ADDCK0D_VAL2 GENMASK(31, 26) @@ -9117,11 +9161,18 @@ #define B_ADDCKR0_DC GENMASK(15, 4) #define B_ADDCKR0_A1 GENMASK(9, 0) #define R_DACK10 0xC100 +#define B_DACK10_RST BIT(17) #define B_DACK10 GENMASK(4, 1) #define R_DACK1_K 0xc104 +#define B_DACK1_VAL GENMASK(31, 20) +#define B_DACK1_RST BIT(17) #define B_DACK1_EN BIT(0) #define R_DACK11 0xC120 #define B_DACK11 GENMASK(4, 1) +#define R_DACK2_K 0xC124 +#define B_DACK2_VAL GENMASK(31, 20) +#define B_DACK2_RST BIT(17) +#define B_DACK2_EN BIT(0) #define R_DACK_S1P0 0xC140 #define B_DACK_S1P0_OK BIT(31) #define R_DACK_BIAS10 0xC148 @@ -9170,6 +9221,11 @@ #define B_DACKN0_V GENMASK(21, 14) #define R_DACKN1_CTL 0xC224 #define B_DACKN1_V GENMASK(21, 14) +#define B_DACKN1_ON BIT(0) +#define R_DACKN2_CTL 0xC238 +#define B_DACKN2_ON BIT(0) +#define R_DACKN3_CTL 0xC24C +#define B_DACKN3_ON BIT(0) #define R_GAIN_MAP0 0xE44C #define B_GAIN_MAP0_EN BIT(0) #define R_GAIN_MAP1 0xE54C diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 1a133914f673..a251b0e3b16e 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -714,7 +714,154 @@ exit: mutex_unlock(&rtwdev->mutex); } -static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) +/* Maximum Transmit Power field (@raw) can be EIRP or PSD. + * Both units are 0.5 dB-based. Return a constraint in dB. + */ +static s8 tpe_get_constraint(s8 raw) +{ + const u8 hw_deviation = 3; /* unit: 0.5 dB */ + const u8 antenna_gain = 10; /* unit: 0.5 dB */ + const u8 array_gain = 6; /* unit: 0.5 dB */ + const u8 offset = hw_deviation + antenna_gain + array_gain; + + return (raw - offset) / 2; +} + +static void tpe_intersect_constraint(struct rtw89_reg_6ghz_tpe *tpe, s8 cstr) +{ + if (tpe->valid) { + tpe->constraint = min(tpe->constraint, cstr); + return; + } + + tpe->constraint = cstr; + tpe->valid = true; +} + +static void tpe_deal_with_eirp(struct rtw89_reg_6ghz_tpe *tpe, + const struct ieee80211_parsed_tpe_eirp *eirp) +{ + unsigned int i; + s8 cstr; + + if (!eirp->valid) + return; + + for (i = 0; i < eirp->count; i++) { + cstr = tpe_get_constraint(eirp->power[i]); + tpe_intersect_constraint(tpe, cstr); + } +} + +static s8 tpe_convert_psd_to_eirp(s8 psd) +{ + static const unsigned int mlog20 = 1301; + + return psd + 10 * mlog20 / 1000; +} + +static void tpe_deal_with_psd(struct rtw89_reg_6ghz_tpe *tpe, + const struct ieee80211_parsed_tpe_psd *psd) +{ + unsigned int i; + s8 cstr_psd; + s8 cstr; + + if (!psd->valid) + return; + + for (i = 0; i < psd->count; i++) { + cstr_psd = tpe_get_constraint(psd->power[i]); + cstr = tpe_convert_psd_to_eirp(cstr_psd); + tpe_intersect_constraint(tpe, cstr); + } +} + +static void rtw89_calculate_tpe(struct rtw89_dev *rtwdev, + struct rtw89_reg_6ghz_tpe *result_tpe, + const struct ieee80211_parsed_tpe *parsed_tpe) +{ + static const u8 category = IEEE80211_TPE_CAT_6GHZ_DEFAULT; + + tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_local[category]); + tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_reg_client[category]); + tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_local[category]); + tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_reg_client[category]); +} + +static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + struct rtw89_reg_6ghz_tpe new = {}; + struct rtw89_vif *rtwvif; + bool changed = false; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + const struct rtw89_reg_6ghz_tpe *tmp; + const struct rtw89_chan *chan; + + chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + if (chan->band_type != RTW89_BAND_6G) + continue; + + tmp = &rtwvif->reg_6ghz_tpe; + if (!tmp->valid) + continue; + + tpe_intersect_constraint(&new, tmp->constraint); + } + + if (memcmp(®ulatory->reg_6ghz_tpe, &new, + sizeof(regulatory->reg_6ghz_tpe)) != 0) + changed = true; + + if (changed) { + if (new.valid) + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "recalc 6 GHz reg TPE to %d dBm\n", + new.constraint); + else + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "recalc 6 GHz reg TPE to none\n"); + + regulatory->reg_6ghz_tpe = new; + } + + return changed; +} + +static int rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool active, + unsigned int *changed) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct rtw89_reg_6ghz_tpe *tpe = &rtwvif->reg_6ghz_tpe; + + memset(tpe, 0, sizeof(*tpe)); + + if (!active || rtwvif->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD) + goto bottom; + + rtw89_calculate_tpe(rtwdev, tpe, &bss_conf->tpe); + if (!tpe->valid) + goto bottom; + + if (tpe->constraint < RTW89_MIN_VALID_POWER_CONSTRAINT) { + rtw89_err(rtwdev, + "%s: constraint %d dBm is less than min valid val\n", + __func__, tpe->constraint); + + tpe->valid = false; + return -EINVAL; + } + +bottom: + *changed += __rtw89_reg_6ghz_tpe_recalc(rtwdev); + return 0; +} + +static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_regd *regd = regulatory->regd; @@ -751,23 +898,21 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) } if (regulatory->reg_6ghz_power == sel) - return; + return false; rtw89_debug(rtwdev, RTW89_DBG_REGD, "recalc 6 GHz reg power type to %d\n", sel); regulatory->reg_6ghz_power = sel; - - rtw89_core_set_chip_txpwr(rtwdev); + return true; } -void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool active) +static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool active, + unsigned int *changed) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - lockdep_assert_held(&rtwdev->mutex); - if (active) { switch (vif->bss_conf.power_type) { case IEEE80211_REG_VLP_AP: @@ -787,5 +932,32 @@ void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; } - __rtw89_reg_6ghz_power_recalc(rtwdev); + *changed += __rtw89_reg_6ghz_power_recalc(rtwdev); + return 0; +} + +int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool active) +{ + unsigned int changed = 0; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + /* The result of reg_6ghz_tpe may depend on reg_6ghz_power type, + * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc(). + */ + + ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, active, &changed); + if (ret) + return ret; + + ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif, active, &changed); + if (ret) + return ret; + + if (changed) + rtw89_core_set_chip_txpwr(rtwdev); + + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 78e276f321c2..40cf84a79c46 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -105,6 +105,10 @@ static const u32 rtw8851b_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8851b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8851b_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -2509,7 +2513,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8851b_c2h_regs, .page_regs = &rtw8851b_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8851b_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = &rtw8851b_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c index ec3629d95fda..d334924faec8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) | BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) | BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index aebbceea93f8..08e148328c62 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -398,6 +398,10 @@ static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8852a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8852a_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -2225,7 +2229,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .c2h_regs = rtw8852a_c2h_regs, .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .page_regs = &rtw8852a_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8852a_wow_wakeup_regs, .cfo_src_fd = false, .cfo_hw_comp = false, .dcfo_comp = &rtw8852a_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index fdee5dd4ba14..9a675e2193bc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = NULL, .dma_addr_set = &rtw89_pci_ch_dma_addr_set, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index 517ad1a763fa..a22847a311ad 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -8,6 +8,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852b.h" +#include "rtw8852b_common.h" #include "rtw8852b_rfk.h" #include "rtw8852b_table.h" #include "txrx.h" @@ -65,167 +66,6 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = { NULL}, }; -static const struct rtw89_reg3_def rtw8852b_pmac_ht20_mcs7_tbl[] = { - {0x4580, 0x0000ffff, 0x0}, - {0x4580, 0xffff0000, 0x0}, - {0x4584, 0x0000ffff, 0x0}, - {0x4584, 0xffff0000, 0x0}, - {0x4580, 0x0000ffff, 0x1}, - {0x4578, 0x00ffffff, 0x2018b}, - {0x4570, 0x03ffffff, 0x7}, - {0x4574, 0x03ffffff, 0x32407}, - {0x45b8, 0x00000010, 0x0}, - {0x45b8, 0x00000100, 0x0}, - {0x45b8, 0x00000080, 0x0}, - {0x45b8, 0x00000008, 0x0}, - {0x45a0, 0x0000ff00, 0x0}, - {0x45a0, 0xff000000, 0x1}, - {0x45a4, 0x0000ff00, 0x2}, - {0x45a4, 0xff000000, 0x3}, - {0x45b8, 0x00000020, 0x0}, - {0x4568, 0xe0000000, 0x0}, - {0x45b8, 0x00000002, 0x1}, - {0x456c, 0xe0000000, 0x0}, - {0x45b4, 0x00006000, 0x0}, - {0x45b4, 0x00001800, 0x1}, - {0x45b8, 0x00000040, 0x0}, - {0x45b8, 0x00000004, 0x0}, - {0x45b8, 0x00000200, 0x0}, - {0x4598, 0xf8000000, 0x0}, - {0x45b8, 0x00100000, 0x0}, - {0x45a8, 0x00000fc0, 0x0}, - {0x45b8, 0x00200000, 0x0}, - {0x45b0, 0x00000038, 0x0}, - {0x45b0, 0x000001c0, 0x0}, - {0x45a0, 0x000000ff, 0x0}, - {0x45b8, 0x00400000, 0x0}, - {0x4590, 0x000007ff, 0x0}, - {0x45b0, 0x00000e00, 0x0}, - {0x45ac, 0x0000001f, 0x0}, - {0x45b8, 0x00800000, 0x0}, - {0x45a8, 0x0003f000, 0x0}, - {0x45b8, 0x01000000, 0x0}, - {0x45b0, 0x00007000, 0x0}, - {0x45b0, 0x00038000, 0x0}, - {0x45a0, 0x00ff0000, 0x0}, - {0x45b8, 0x02000000, 0x0}, - {0x4590, 0x003ff800, 0x0}, - {0x45b0, 0x001c0000, 0x0}, - {0x45ac, 0x000003e0, 0x0}, - {0x45b8, 0x04000000, 0x0}, - {0x45a8, 0x00fc0000, 0x0}, - {0x45b8, 0x08000000, 0x0}, - {0x45b0, 0x00e00000, 0x0}, - {0x45b0, 0x07000000, 0x0}, - {0x45a4, 0x000000ff, 0x0}, - {0x45b8, 0x10000000, 0x0}, - {0x4594, 0x000007ff, 0x0}, - {0x45b0, 0x38000000, 0x0}, - {0x45ac, 0x00007c00, 0x0}, - {0x45b8, 0x20000000, 0x0}, - {0x45a8, 0x3f000000, 0x0}, - {0x45b8, 0x40000000, 0x0}, - {0x45b4, 0x00000007, 0x0}, - {0x45b4, 0x00000038, 0x0}, - {0x45a4, 0x00ff0000, 0x0}, - {0x45b8, 0x80000000, 0x0}, - {0x4594, 0x003ff800, 0x0}, - {0x45b4, 0x000001c0, 0x0}, - {0x4598, 0xf8000000, 0x0}, - {0x45b8, 0x00100000, 0x0}, - {0x45a8, 0x00000fc0, 0x7}, - {0x45b8, 0x00200000, 0x0}, - {0x45b0, 0x00000038, 0x0}, - {0x45b0, 0x000001c0, 0x0}, - {0x45a0, 0x000000ff, 0x0}, - {0x45b4, 0x06000000, 0x0}, - {0x45b0, 0x00000007, 0x0}, - {0x45b8, 0x00080000, 0x0}, - {0x45a8, 0x0000003f, 0x0}, - {0x457c, 0xffe00000, 0x1}, - {0x4530, 0xffffffff, 0x0}, - {0x4588, 0x00003fff, 0x0}, - {0x4598, 0x000001ff, 0x0}, - {0x4534, 0xffffffff, 0x0}, - {0x4538, 0xffffffff, 0x0}, - {0x453c, 0xffffffff, 0x0}, - {0x4588, 0x0fffc000, 0x0}, - {0x4598, 0x0003fe00, 0x0}, - {0x4540, 0xffffffff, 0x0}, - {0x4544, 0xffffffff, 0x0}, - {0x4548, 0xffffffff, 0x0}, - {0x458c, 0x00003fff, 0x0}, - {0x4598, 0x07fc0000, 0x0}, - {0x454c, 0xffffffff, 0x0}, - {0x4550, 0xffffffff, 0x0}, - {0x4554, 0xffffffff, 0x0}, - {0x458c, 0x0fffc000, 0x0}, - {0x459c, 0x000001ff, 0x0}, - {0x4558, 0xffffffff, 0x0}, - {0x455c, 0xffffffff, 0x0}, - {0x4530, 0xffffffff, 0x4e790001}, - {0x4588, 0x00003fff, 0x0}, - {0x4598, 0x000001ff, 0x1}, - {0x4534, 0xffffffff, 0x0}, - {0x4538, 0xffffffff, 0x4b}, - {0x45ac, 0x38000000, 0x7}, - {0x4588, 0xf0000000, 0x0}, - {0x459c, 0x7e000000, 0x0}, - {0x45b8, 0x00040000, 0x0}, - {0x45b8, 0x00020000, 0x0}, - {0x4590, 0xffc00000, 0x0}, - {0x45b8, 0x00004000, 0x0}, - {0x4578, 0xff000000, 0x0}, - {0x45b8, 0x00000400, 0x0}, - {0x45b8, 0x00000800, 0x0}, - {0x45b8, 0x00001000, 0x0}, - {0x45b8, 0x00002000, 0x0}, - {0x45b4, 0x00018000, 0x0}, - {0x45ac, 0x07800000, 0x0}, - {0x45b4, 0x00000600, 0x2}, - {0x459c, 0x0001fe00, 0x80}, - {0x45ac, 0x00078000, 0x3}, - {0x459c, 0x01fe0000, 0x1}, -}; - -static const struct rtw89_reg3_def rtw8852b_btc_preagc_en_defs[] = { - {0x46D0, GENMASK(1, 0), 0x3}, - {0x4790, GENMASK(1, 0), 0x3}, - {0x4AD4, GENMASK(31, 0), 0xf}, - {0x4AE0, GENMASK(31, 0), 0xf}, - {0x4688, GENMASK(31, 24), 0x80}, - {0x476C, GENMASK(31, 24), 0x80}, - {0x4694, GENMASK(7, 0), 0x80}, - {0x4694, GENMASK(15, 8), 0x80}, - {0x4778, GENMASK(7, 0), 0x80}, - {0x4778, GENMASK(15, 8), 0x80}, - {0x4AE4, GENMASK(23, 0), 0x780D1E}, - {0x4AEC, GENMASK(23, 0), 0x780D1E}, - {0x469C, GENMASK(31, 26), 0x34}, - {0x49F0, GENMASK(31, 26), 0x34}, -}; - -static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_en_defs); - -static const struct rtw89_reg3_def rtw8852b_btc_preagc_dis_defs[] = { - {0x46D0, GENMASK(1, 0), 0x0}, - {0x4790, GENMASK(1, 0), 0x0}, - {0x4AD4, GENMASK(31, 0), 0x60}, - {0x4AE0, GENMASK(31, 0), 0x60}, - {0x4688, GENMASK(31, 24), 0x1a}, - {0x476C, GENMASK(31, 24), 0x1a}, - {0x4694, GENMASK(7, 0), 0x2a}, - {0x4694, GENMASK(15, 8), 0x2a}, - {0x4778, GENMASK(7, 0), 0x2a}, - {0x4778, GENMASK(15, 8), 0x2a}, - {0x4AE4, GENMASK(23, 0), 0x79E99E}, - {0x4AEC, GENMASK(23, 0), 0x79E99E}, - {0x469C, GENMASK(31, 26), 0x26}, - {0x49F0, GENMASK(31, 26), 0x26}, -}; - -static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_dis_defs); - static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = { R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3 @@ -236,6 +76,10 @@ static const u32 rtw8852b_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8852b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8852b_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -591,806 +435,6 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) return 0; } -static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse, - struct rtw8852b_efuse *map) -{ - ether_addr_copy(efuse->addr, map->e.mac_addr); - efuse->rfe_type = map->rfe_type; - efuse->xtal_cap = map->xtal_k; -} - -static void rtw8852b_efuse_parsing_tssi(struct rtw89_dev *rtwdev, - struct rtw8852b_efuse *map) -{ - struct rtw89_tssi_info *tssi = &rtwdev->tssi; - struct rtw8852b_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi}; - u8 i, j; - - tssi->thermal[RF_PATH_A] = map->path_a_therm; - tssi->thermal[RF_PATH_B] = map->path_b_therm; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi, - sizeof(ofst[i]->cck_tssi)); - - for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n", - i, j, tssi->tssi_cck[i][j]); - - memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi, - sizeof(ofst[i]->bw40_tssi)); - memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM, - ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g)); - - for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n", - i, j, tssi->tssi_mcs[i][j]); - } -} - -static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low) -{ - if (high) - *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3); - if (low) - *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3); - - return data != 0xff; -} - -static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev, - struct rtw8852b_efuse *map) -{ - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - bool valid = false; - - valid |= _decode_efuse_gain(map->rx_gain_2g_cck, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]); - valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]); - valid |= _decode_efuse_gain(map->rx_gain_5g_low, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]); - valid |= _decode_efuse_gain(map->rx_gain_5g_mid, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]); - valid |= _decode_efuse_gain(map->rx_gain_5g_high, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]); - - gain->offset_valid = valid; -} - -static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, - enum rtw89_efuse_block block) -{ - struct rtw89_efuse *efuse = &rtwdev->efuse; - struct rtw8852b_efuse *map; - - map = (struct rtw8852b_efuse *)log_map; - - efuse->country_code[0] = map->country_code[0]; - efuse->country_code[1] = map->country_code[1]; - rtw8852b_efuse_parsing_tssi(rtwdev, map); - rtw8852b_efuse_parsing_gain_offset(rtwdev, map); - - switch (rtwdev->hci.type) { - case RTW89_HCI_TYPE_PCIE: - rtw8852be_efuse_parsing(efuse, map); - break; - default: - return -EOPNOTSUPP; - } - - rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type); - - return 0; -} - -static void rtw8852b_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ -#define PWR_K_CHK_OFFSET 0x5E9 -#define PWR_K_CHK_VALUE 0xAA - u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr; - - if (phycap_map[offset] == PWR_K_CHK_VALUE) - rtwdev->efuse.power_k_valid = true; -} - -static void rtw8852b_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - struct rtw89_tssi_info *tssi = &rtwdev->tssi; - static const u32 tssi_trim_addr[RF_PATH_NUM_8852B] = {0x5D6, 0x5AB}; - u32 addr = rtwdev->chip->phycap_addr; - bool pg = false; - u32 ofst; - u8 i, j; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) { - /* addrs are in decreasing order */ - ofst = tssi_trim_addr[i] - addr - j; - tssi->tssi_trim[i][j] = phycap_map[ofst]; - - if (phycap_map[ofst] != 0xff) - pg = true; - } - } - - if (!pg) { - memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim)); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][TRIM] no PG, set all trim info to 0\n"); - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n", - i, j, tssi->tssi_trim[i][j], - tssi_trim_addr[i] - j); -} - -static void rtw8852b_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev, - u8 *phycap_map) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - static const u32 thm_trim_addr[RF_PATH_NUM_8852B] = {0x5DF, 0x5DC}; - u32 addr = rtwdev->chip->phycap_addr; - u8 i; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr]; - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n", - i, info->thermal_trim[i]); - - if (info->thermal_trim[i] != 0xff) - info->pg_thermal_trim = true; - } -} - -static void rtw8852b_thermal_trim(struct rtw89_dev *rtwdev) -{ -#define __thm_setting(raw) \ -({ \ - u8 __v = (raw); \ - ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \ -}) - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - u8 i, val; - - if (!info->pg_thermal_trim) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] no PG, do nothing\n"); - - return; - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - val = __thm_setting(info->thermal_trim[i]); - rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val); - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n", - i, val); - } -#undef __thm_setting -} - -static void rtw8852b_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev, - u8 *phycap_map) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - static const u32 pabias_trim_addr[RF_PATH_NUM_8852B] = {0x5DE, 0x5DB}; - u32 addr = rtwdev->chip->phycap_addr; - u8 i; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr]; - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n", - i, info->pa_bias_trim[i]); - - if (info->pa_bias_trim[i] != 0xff) - info->pg_pa_bias_trim = true; - } -} - -static void rtw8852b_pa_bias_trim(struct rtw89_dev *rtwdev) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - u8 pabias_2g, pabias_5g; - u8 i; - - if (!info->pg_pa_bias_trim) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] no PG, do nothing\n"); - - return; - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]); - pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]); - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n", - i, pabias_2g, pabias_5g); - - rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g); - rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g); - } -} - -static void rtw8852b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = { - {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8}, - {0x590, 0x58F, 0, 0x58E, 0x58D}, - }; - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - u32 phycap_addr = rtwdev->chip->phycap_addr; - bool valid = false; - int path, i; - u8 data; - - for (path = 0; path < 2; path++) - for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) { - if (comp_addrs[path][i] == 0) - continue; - - data = phycap_map[comp_addrs[path][i] - phycap_addr]; - valid |= _decode_efuse_gain(data, NULL, - &gain->comp[path][i]); - } - - gain->comp_valid = valid; -} - -static int rtw8852b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - rtw8852b_phycap_parsing_power_cal(rtwdev, phycap_map); - rtw8852b_phycap_parsing_tssi(rtwdev, phycap_map); - rtw8852b_phycap_parsing_thermal_trim(rtwdev, phycap_map); - rtw8852b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map); - rtw8852b_phycap_parsing_gain_comp(rtwdev, phycap_map); - - return 0; -} - -static void rtw8852b_power_trim(struct rtw89_dev *rtwdev) -{ - rtw8852b_thermal_trim(rtwdev); - rtw8852b_pa_bias_trim(rtwdev); -} - -static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - u8 mac_idx) -{ - u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); - u8 txsc20 = 0, txsc40 = 0; - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_80: - txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40); - fallthrough; - case RTW89_CHANNEL_WIDTH_40: - txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20); - break; - default: - break; - } - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_80: - rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1)); - rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4)); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0)); - rtw89_write32(rtwdev, sub_carr, txsc20); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK); - rtw89_write32(rtwdev, sub_carr, 0); - break; - default: - break; - } - - if (chan->channel > 14) { - rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE); - rtw89_write8_set(rtwdev, chk_rate, - B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); - } else { - rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE); - rtw89_write8_clr(rtwdev, chk_rate, - B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); - } -} - -static const u32 rtw8852b_sco_barker_threshold[14] = { - 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6, - 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4 -}; - -static const u32 rtw8852b_sco_cck_threshold[14] = { - 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724, - 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed -}; - -static void rtw8852b_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch) -{ - u8 ch_element = primary_ch - 1; - - rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH, - rtw8852b_sco_barker_threshold[ch_element]); - rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH, - rtw8852b_sco_cck_threshold[ch_element]); -} - -static u8 rtw8852b_sco_mapping(u8 central_ch) -{ - if (central_ch == 1) - return 109; - else if (central_ch >= 2 && central_ch <= 6) - return 108; - else if (central_ch >= 7 && central_ch <= 10) - return 107; - else if (central_ch >= 11 && central_ch <= 14) - return 106; - else if (central_ch == 36 || central_ch == 38) - return 51; - else if (central_ch >= 40 && central_ch <= 58) - return 50; - else if (central_ch >= 60 && central_ch <= 64) - return 49; - else if (central_ch == 100 || central_ch == 102) - return 48; - else if (central_ch >= 104 && central_ch <= 126) - return 47; - else if (central_ch >= 128 && central_ch <= 151) - return 46; - else if (central_ch >= 153 && central_ch <= 177) - return 45; - else - return 0; -} - -struct rtw8852b_bb_gain { - u32 gain_g[BB_PATH_NUM_8852B]; - u32 gain_a[BB_PATH_NUM_8852B]; - u32 gain_mask; -}; - -static const struct rtw8852b_bb_gain bb_gain_lna[LNA_GAIN_NUM] = { - { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, - .gain_mask = 0xff000000 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x000000ff }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x0000ff00 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0xff000000 }, - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0x000000ff }, -}; - -static const struct rtw8852b_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0xff000000 }, -}; - -static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev, - enum rtw89_subband subband, - enum rtw89_rf_path path) -{ - const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; - u8 gain_band = rtw89_subband_to_bb_gain_band(subband); - s32 val; - u32 reg; - u32 mask; - int i; - - for (i = 0; i < LNA_GAIN_NUM; i++) { - if (subband == RTW89_CH_2G) - reg = bb_gain_lna[i].gain_g[path]; - else - reg = bb_gain_lna[i].gain_a[path]; - - mask = bb_gain_lna[i].gain_mask; - val = gain->lna_gain[gain_band][path][i]; - rtw89_phy_write32_mask(rtwdev, reg, mask, val); - } - - for (i = 0; i < TIA_GAIN_NUM; i++) { - if (subband == RTW89_CH_2G) - reg = bb_gain_tia[i].gain_g[path]; - else - reg = bb_gain_tia[i].gain_a[path]; - - mask = bb_gain_tia[i].gain_mask; - val = gain->tia_gain[gain_band][path][i]; - rtw89_phy_write32_mask(rtwdev, reg, mask, val); - } -} - -static void rtw8852b_set_gain_offset(struct rtw89_dev *rtwdev, - enum rtw89_subband subband, - enum rtw89_phy_idx phy_idx) -{ - static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD}; - static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1, - R_PATH1_G_TIA1_LNA6_OP1DB_V1}; - struct rtw89_hal *hal = &rtwdev->hal; - struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain; - enum rtw89_gain_offset gain_ofdm_band; - s32 offset_a, offset_b; - s32 offset_ofdm, offset_cck; - s32 tmp; - u8 path; - - if (!efuse_gain->comp_valid) - goto next; - - for (path = RF_PATH_A; path < BB_PATH_NUM_8852B; path++) { - tmp = efuse_gain->comp[path][subband]; - tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp); - } - -next: - if (!efuse_gain->offset_valid) - return; - - gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband); - - offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; - offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; - - tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp); - - tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp); - - if (hal->antenna_rx == RF_B) { - offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; - offset_cck = -efuse_gain->offset[RF_PATH_B][0]; - } else { - offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; - offset_cck = -efuse_gain->offset[RF_PATH_A][0]; - } - - tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0]; - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); - - tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0]; - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); - - if (subband == RTW89_CH_2G) { - tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1); - tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1); - rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST, - B_RX_RPL_OFST_CCK_MASK, tmp); - } -} - -static -void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband) -{ - const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; - u8 band = rtw89_subband_to_bb_gain_band(subband); - u32 val; - - val = FIELD_PREP(B_P0_RPL1_20_MASK, (gain->rpl_ofst_20[band][RF_PATH_A] + - gain->rpl_ofst_20[band][RF_PATH_B]) / 2) | - FIELD_PREP(B_P0_RPL1_40_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][0] + - gain->rpl_ofst_40[band][RF_PATH_B][0]) / 2) | - FIELD_PREP(B_P0_RPL1_41_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][1] + - gain->rpl_ofst_40[band][RF_PATH_B][1]) / 2); - val >>= B_P0_RPL1_SHIFT; - rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val); - rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val); - - val = FIELD_PREP(B_P0_RTL2_42_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][2] + - gain->rpl_ofst_40[band][RF_PATH_B][2]) / 2) | - FIELD_PREP(B_P0_RTL2_80_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][0] + - gain->rpl_ofst_80[band][RF_PATH_B][0]) / 2) | - FIELD_PREP(B_P0_RTL2_81_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][1] + - gain->rpl_ofst_80[band][RF_PATH_B][1]) / 2) | - FIELD_PREP(B_P0_RTL2_8A_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][10] + - gain->rpl_ofst_80[band][RF_PATH_B][10]) / 2); - rtw89_phy_write32(rtwdev, R_P0_RPL2, val); - rtw89_phy_write32(rtwdev, R_P1_RPL2, val); - - val = FIELD_PREP(B_P0_RTL3_82_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][2] + - gain->rpl_ofst_80[band][RF_PATH_B][2]) / 2) | - FIELD_PREP(B_P0_RTL3_83_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][3] + - gain->rpl_ofst_80[band][RF_PATH_B][3]) / 2) | - FIELD_PREP(B_P0_RTL3_84_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][4] + - gain->rpl_ofst_80[band][RF_PATH_B][4]) / 2) | - FIELD_PREP(B_P0_RTL3_89_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][9] + - gain->rpl_ofst_80[band][RF_PATH_B][9]) / 2); - rtw89_phy_write32(rtwdev, R_P0_RPL3, val); - rtw89_phy_write32(rtwdev, R_P1_RPL3, val); -} - -static void rtw8852b_ctrl_ch(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - u8 central_ch = chan->channel; - u8 subband = chan->subband_type; - u8 sco_comp; - bool is_2g = central_ch <= 14; - - /* Path A */ - if (is_2g) - rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, - B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx); - else - rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, - B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx); - - /* Path B */ - if (is_2g) - rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, - B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx); - else - rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, - B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx); - - /* SCO compensate FC setting */ - sco_comp = rtw8852b_sco_mapping(central_ch); - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx); - - if (chan->band_type == RTW89_BAND_6G) - return; - - /* CCK parameters */ - if (central_ch == 14) { - rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff); - rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de); - rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad); - rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e); - rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92); - rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011); - rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c); - rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a); - } else { - rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff); - rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354); - rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8); - rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053); - rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a); - rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92); - rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc); - rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5); - } - - rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_A); - rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_B); - rtw8852b_set_gain_offset(rtwdev, subband, phy_idx); - rtw8852b_set_rxsc_rpl_comp(rtwdev, subband); -} - -static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) -{ - static const u32 adc_sel[2] = {0xC0EC, 0xC1EC}; - static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; - - switch (bw) { - case RTW89_CHANNEL_WIDTH_5: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); - break; - case RTW89_CHANNEL_WIDTH_10: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - default: - rtw89_warn(rtwdev, "Fail to set ADC\n"); - } -} - -static void rtw8852b_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, - enum rtw89_phy_idx phy_idx) -{ - u32 rx_path_0; - - switch (bw) { - case RTW89_CHANNEL_WIDTH_5: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_10: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, - pri_ch, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - /*CCK primary channel */ - if (pri_ch == RTW89_SC_20_UPPER) - rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1); - else - rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0); - - break; - case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, - pri_ch, phy_idx); - - /*Set RF mode at A */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx); - break; - default: - rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, - pri_ch); - } - - rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A); - rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B); - - rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, - phy_idx); - if (rx_path_0 == 0x1) - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx); - else if (rx_path_0 == 0x2) - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx); -} - -static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en) -{ - if (cck_en) { - rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1); - rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0); - } else { - rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0); - rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1); - } -} - -static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - u8 pri_ch = chan->pri_ch_idx; - bool mask_5m_low; - bool mask_5m_en; - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_40: - /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */ - mask_5m_en = true; - mask_5m_low = pri_ch == RTW89_SC_20_LOWER; - break; - case RTW89_CHANNEL_WIDTH_80: - /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */ - mask_5m_en = pri_ch == RTW89_SC_20_UPMOST || - pri_ch == RTW89_SC_20_LOWEST; - mask_5m_low = pri_ch == RTW89_SC_20_LOWEST; - break; - default: - mask_5m_en = false; - break; - } - - if (!mask_5m_en) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0); - rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, - B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx); - return; - } - - if (mask_5m_low) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1); - } else { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0); - } - rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, - B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx); -} - -static void rtw8852b_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); - fsleep(1); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); -} - static void rtw8852b_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band, enum rtw89_phy_idx phy_idx, bool en) { @@ -1422,87 +466,20 @@ static void rtw8852b_bb_reset(struct rtw89_dev *rtwdev, rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); - rtw8852b_bb_reset_all(rtwdev, phy_idx); + rtw8852bx_bb_reset_all(rtwdev, phy_idx); rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON); rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); } -static void rtw8852b_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - u32 addr; - - for (addr = R_AX_PWR_MACID_LMT_TABLE0; - addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) - rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); -} - -static void rtw8852b_bb_sethw(struct rtw89_dev *rtwdev) -{ - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - - rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP); - rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP); - - rtw8852b_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); - - /* read these registers after loading BB parameters */ - gain->offset_base[RTW89_PHY_0] = - rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK); - gain->rssi_base[RTW89_PHY_0] = - rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK); -} - -static void rtw8852b_bb_set_pop(struct rtw89_dev *rtwdev) -{ - if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) - rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN); -} - -static void rtw8852b_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - bool cck_en = chan->channel <= 14; - u8 pri_ch_idx = chan->pri_ch_idx; - u8 band = chan->band_type, chan_idx; - - if (cck_en) - rtw8852b_ctrl_sco_cck(rtwdev, chan->primary_channel); - - rtw8852b_ctrl_ch(rtwdev, chan, phy_idx); - rtw8852b_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx); - rtw8852b_ctrl_cck_en(rtwdev, cck_en); - if (chan->band_type == RTW89_BAND_5G) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x0); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); - } - chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band); - rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx); - rtw8852b_5m_mask(rtwdev, chan, phy_idx); - rtw8852b_bb_set_pop(rtwdev); - rtw8852b_bb_reset_all(rtwdev, phy_idx); -} - static void rtw8852b_set_channel(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_mac_idx mac_idx, enum rtw89_phy_idx phy_idx) { - rtw8852b_set_channel_mac(rtwdev, chan, mac_idx); - rtw8852b_set_channel_bb(rtwdev, chan, phy_idx); + rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx); + rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx); rtw8852b_set_channel_rf(rtwdev, chan, phy_idx); } @@ -1602,540 +579,6 @@ static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev) rtw8852b_dpk_track(rtwdev); } -static u32 rtw8852b_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, s16 ref) -{ - const u16 tssi_16dbm_cw = 0x12c; - const u8 base_cw_0db = 0x27; - const s8 ofst_int = 0; - s16 pwr_s10_3; - s16 rf_pwr_cw; - u16 bb_pwr_cw; - u32 pwr_cw; - u32 tssi_ofst_cw; - - pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); - bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3); - rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3); - rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); - pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; - - tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", - tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); - - return FIELD_PREP(B_DPD_TSSI_CW, tssi_ofst_cw) | - FIELD_PREP(B_DPD_PWR_CW, pwr_cw) | - FIELD_PREP(B_DPD_REF, ref); -} - -static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - static const u32 addr[RF_PATH_NUM_8852B] = {0x5800, 0x7800}; - const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF; - const u8 ofst_ofdm = 0x4; - const u8 ofst_cck = 0x8; - const s16 ref_ofdm = 0; - const s16 ref_cck = 0; - u32 val; - u8 i; - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); - - rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, - B_AX_PWR_REF, 0x0); - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); - val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, - phy_idx); - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); - val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, - phy_idx); -} - -static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - u8 tx_shape_idx, - enum rtw89_phy_idx phy_idx) -{ -#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2)) -#define __DFIR_CFG_MASK 0xffffffff -#define __DFIR_CFG_NR 8 -#define __DECL_DFIR_PARAM(_name, _val...) \ - static const u32 param_ ## _name[] = {_val}; \ - static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR) - - __DECL_DFIR_PARAM(flat, - 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053, - 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5); - __DECL_DFIR_PARAM(sharp, - 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090, - 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5); - __DECL_DFIR_PARAM(sharp_14, - 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E, - 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A); - u8 ch = chan->channel; - const u32 *param; - u32 addr; - int i; - - if (ch > 14) { - rtw89_warn(rtwdev, - "set tx shape dfir by unknown ch: %d on 2G\n", ch); - return; - } - - if (ch == 14) - param = param_sharp_14; - else - param = tx_shape_idx == 0 ? param_flat : param_sharp; - - for (i = 0; i < __DFIR_CFG_NR; i++) { - addr = __DFIR_CFG_ADDR(i); - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]); - rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i], - phy_idx); - } - -#undef __DECL_DFIR_PARAM -#undef __DFIR_CFG_NR -#undef __DFIR_CFG_MASK -#undef __DECL_CFG_ADDR -} - -static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; - u8 band = chan->band_type; - u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; - - if (band == RTW89_BAND_2G) - rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); - - rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, - tx_shape_ofdm); -} - -static void rtw8852b_set_txpwr(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx); - rtw8852b_set_tx_shape(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); -} - -static void rtw8852b_set_txpwr_ctrl(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - rtw8852b_set_txpwr_ref(rtwdev, phy_idx); -} - -static -void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, - s8 pw_ofst, enum rtw89_mac_idx mac_idx) -{ - u32 reg; - - if (pw_ofst < -16 || pw_ofst > 15) { - rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst); - return; - } - - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); - rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); - rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); - - pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); - rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); -} - -static int -rtw8852b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) -{ - int ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); - if (ret) - return ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000); - if (ret) - return ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); - if (ret) - return ret; - - rtw8852b_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ? - RTW89_MAC_1 : RTW89_MAC_0); - - return 0; -} - -void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev) -{ - const struct rtw89_reg3_def *def = rtw8852b_pmac_ht20_mcs7_tbl; - u8 i; - - for (i = 0; i < ARRAY_SIZE(rtw8852b_pmac_ht20_mcs7_tbl); i++, def++) - rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); -} - -static void rtw8852b_stop_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx"); - if (tx_info->mode == CONT_TX) - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx); - else if (tx_info->mode == PKTS_TX) - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx); -} - -static void rtw8852b_start_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - enum rtw8852b_pmac_mode mode = tx_info->mode; - u32 pkt_cnt = tx_info->tx_cnt; - u16 period = tx_info->period; - - if (mode == CONT_TX && !tx_info->is_cck) { - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start"); - } else if (mode == PKTS_TX) { - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, - B_PMAC_TX_PRD_MSK, period, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK, - pkt_cnt, idx); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start"); - } - - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx); -} - -void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - - if (!tx_info->en_pmac_tx) { - rtw8852b_stop_pmac_tx(rtwdev, tx_info, idx); - rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); - if (chan->band_type == RTW89_BAND_2G) - rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS); - return; - } - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable"); - - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx); - rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx); - - rtw8852b_start_pmac_tx(rtwdev, tx_info, idx); -} - -void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, - u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx) -{ - struct rtw8852b_bb_pmac_info tx_info = {0}; - - tx_info.en_pmac_tx = enable; - tx_info.is_cck = 0; - tx_info.mode = PKTS_TX; - tx_info.tx_cnt = tx_cnt; - tx_info.period = period; - tx_info.tx_time = tx_time; - - rtw8852b_bb_set_pmac_tx(rtwdev, &tx_info, idx); -} - -void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, - enum rtw89_phy_idx idx) -{ - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm); - - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx); -} - -void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) -{ - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0); - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path); - - if (tx_path == RF_PATH_A) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); - } else if (tx_path == RF_PATH_B) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); - } else if (tx_path == RF_PATH_AB) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4); - } else { - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path"); - } -} - -void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx idx, u8 mode) -{ - if (mode != 0) - return; - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch"); - - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); -} - -void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - struct rtw8852b_bb_tssi_bak *bak) -{ - s32 tmp; - - bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx); - bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx); - bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx); - bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx); - bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx); - bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx); - tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx); - bak->tx_pwr = sign_extend32(tmp, 8); -} - -void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - const struct rtw8852b_bb_tssi_bak *bak) -{ - rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx); - if (bak->tx_path == RF_AB) - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4); - else - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx); - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx); - rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); -} - -static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, - enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl : - &rtw8852b_btc_preagc_dis_defs_tbl); -} - -static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, - enum rtw89_phy_idx phy_idx) -{ - if (en) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, - B_PATH1_G_LNA6_OP1DB_V1, 0x20); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, - B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x1); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1); - } else { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, - B_PATH1_G_LNA6_OP1DB_V1, 0x1a); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, - B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x1); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); - } -} - -void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) -{ - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - u32 rst_mask0; - u32 rst_mask1; - - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else if (rx_path == RF_B) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else if (rx_path == RF_AB) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - } - - rtw8852b_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0); - - if (chan->band_type == RTW89_BAND_2G && - (rx_path == RF_B || rx_path == RF_AB)) - rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); - else - rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); - - rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; - rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); - rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); - } else { - rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); - rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); - } -} - -static void rtw8852b_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) -{ - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x333); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x111); - } else if (rx_path == RF_B) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x111); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x333); - } else if (rx_path == RF_AB) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x333); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x333); - } -} - -static void rtw8852b_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) -{ - struct rtw89_hal *hal = &rtwdev->hal; - enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB; - - rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path); - rtw8852b_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path); - - if (rtwdev->hal.rx_nss == 1) { - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else { - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - } - - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0); -} - -static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) -{ - if (rtwdev->is_tssi_mode[rf_path]) { - u32 addr = 0x1c10 + (rf_path << 13); - - return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000); - } - - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); - - fsleep(200); - - return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); -} - static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev) { const struct rtw89_btc_ver *ver = rtwdev->btc.ver; @@ -2190,86 +633,6 @@ static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev) } } -static -void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) -{ - rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000); - rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group); - rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); - rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); -} - -static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev) -{ - struct rtw89_btc *btc = &rtwdev->btc; - const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_mac_ax_coex coex_params = { - .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, - .direction = RTW89_MAC_AX_COEX_INNER, - }; - - /* PTA init */ - rtw89_mac_coex_init(rtwdev, &coex_params); - - /* set WL Tx response = Hi-Pri */ - chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); - chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); - - /* set rf gnt debug off */ - rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0); - - /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ - if (btc->ant_type == BTC_ANT_SHARED) { - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); - /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */ - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f); - } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff); - } - - /* set PTA break table */ - rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM); - - /* enable BT counter 0xda40[16,2] = 2b'11 */ - rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN); - btc->cx.wl.status.map.init_ok = true; -} - -static -void rtw8852b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) -{ - u32 bitmap; - u32 reg; - - switch (map) { - case BTC_PRI_MASK_TX_RESP: - reg = R_BTC_BT_COEX_MSK_TABLE; - bitmap = B_BTC_PRI_MASK_TX_RESP_V1; - break; - case BTC_PRI_MASK_BEACON: - reg = R_AX_WL_PRI_MSK; - bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ; - break; - case BTC_PRI_MASK_RX_CCK: - reg = R_BTC_BT_COEX_MSK_TABLE; - bitmap = B_BTC_PRI_MASK_RXCCK_V1; - break; - default: - return; - } - - if (state) - rtw89_write32_set(rtwdev, reg, bitmap); - else - rtw89_write32_clr(rtwdev, reg, bitmap); -} - union rtw8852b_btc_wl_txpwr_ctrl { u32 txpwr_val; struct { @@ -2337,186 +700,19 @@ do { \ #undef __write_ctrl } -static -s8 rtw8852b_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) -{ - /* +6 for compensate offset */ - return clamp_t(s8, val + 6, -100, 0) + 100; -} - -static -void rtw8852b_btc_update_bt_cnt(struct rtw89_dev *rtwdev) -{ - /* Feature move to firmware */ -} - -static void rtw8852b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) -{ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31); - - /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ - if (state) - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179); - else - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20); - - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); -} - -static void rtw8852b_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level) -{ - switch (level) { - case 0: /* default */ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); - break; - case 1: /* Fix LNA2=5 */ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); - break; - } -} - -static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) -{ - struct rtw89_btc *btc = &rtwdev->btc; - - switch (level) { - case 0: /* original */ - default: - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); - btc->dm.wl_lna2 = 0; - break; - case 1: /* for FDD free-run */ - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); - btc->dm.wl_lna2 = 0; - break; - case 2: /* for BTG Co-Rx*/ - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); - btc->dm.wl_lna2 = 1; - break; - } - - rtw8852b_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2); -} - -static void rtw8852b_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, - struct rtw89_rx_phy_ppdu *phy_ppdu, - struct ieee80211_rx_status *status) -{ - u16 chan = phy_ppdu->chan_idx; - enum nl80211_band band; - u8 ch; - - if (chan == 0) - return; - - rtw89_decode_chan_idx(rtwdev, chan, &ch, &band); - status->freq = ieee80211_channel_to_frequency(ch, band); - status->band = band; -} - -static void rtw8852b_query_ppdu(struct rtw89_dev *rtwdev, - struct rtw89_rx_phy_ppdu *phy_ppdu, - struct ieee80211_rx_status *status) -{ - u8 path; - u8 *rx_power = phy_ppdu->rssi; - - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); - for (path = 0; path < rtwdev->chip->rf_path_num; path++) { - status->chains |= BIT(path); - status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); - } - if (phy_ppdu->valid) - rtw8852b_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); -} - -static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev) -{ - int ret; - - rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, - B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); - rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1); - rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7, - FULL_BIT_MASK); - if (ret) - return ret; - - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7, - FULL_BIT_MASK); - if (ret) - return ret; - - rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE); - - return 0; -} - -static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) -{ - u8 wl_rfc_s0; - u8 wl_rfc_s1; - int ret; - - rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, - B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); - - ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0); - if (ret) - return ret; - wl_rfc_s0 &= ~XTAL_SI_RF00S_EN; - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0, - FULL_BIT_MASK); - if (ret) - return ret; - - ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1); - if (ret) - return ret; - wl_rfc_s1 &= ~XTAL_SI_RF10S_EN; - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1, - FULL_BIT_MASK); - return ret; -} - static const struct rtw89_chip_ops rtw8852b_chip_ops = { - .enable_bb_rf = rtw8852b_mac_enable_bb_rf, - .disable_bb_rf = rtw8852b_mac_disable_bb_rf, + .enable_bb_rf = rtw8852bx_mac_enable_bb_rf, + .disable_bb_rf = rtw8852bx_mac_disable_bb_rf, .bb_preinit = NULL, .bb_postinit = NULL, .bb_reset = rtw8852b_bb_reset, - .bb_sethw = rtw8852b_bb_sethw, + .bb_sethw = rtw8852bx_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_channel = rtw8852b_set_channel, .set_channel_help = rtw8852b_set_channel_help, - .read_efuse = rtw8852b_read_efuse, - .read_phycap = rtw8852b_read_phycap, + .read_efuse = rtw8852bx_read_efuse, + .read_phycap = rtw8852bx_read_phycap, .fem_setup = NULL, .rfe_gpio = NULL, .rfk_hw_init = NULL, @@ -2526,16 +722,16 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .rfk_band_changed = rtw8852b_rfk_band_changed, .rfk_scan = rtw8852b_rfk_scan, .rfk_track = rtw8852b_rfk_track, - .power_trim = rtw8852b_power_trim, - .set_txpwr = rtw8852b_set_txpwr, - .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl, - .init_txpwr_unit = rtw8852b_init_txpwr_unit, - .get_thermal = rtw8852b_get_thermal, - .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx, - .query_ppdu = rtw8852b_query_ppdu, - .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx, - .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path, - .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset, + .power_trim = rtw8852bx_power_trim, + .set_txpwr = rtw8852bx_set_txpwr, + .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl, + .init_txpwr_unit = rtw8852bx_init_txpwr_unit, + .get_thermal = rtw8852bx_get_thermal, + .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, + .query_ppdu = rtw8852bx_query_ppdu, + .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, + .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, + .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852b_pwr_on_func, .pwr_off_func = rtw8852b_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc, @@ -2554,13 +750,13 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .h2c_ba_cam = rtw89_fw_h2c_ba_cam, .btc_set_rfe = rtw8852b_btc_set_rfe, - .btc_init_cfg = rtw8852b_btc_init_cfg, - .btc_set_wl_pri = rtw8852b_btc_set_wl_pri, + .btc_init_cfg = rtw8852bx_btc_init_cfg, + .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri, .btc_set_wl_txpwr_ctrl = rtw8852b_btc_set_wl_txpwr_ctrl, - .btc_get_bt_rssi = rtw8852b_btc_get_bt_rssi, - .btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt, - .btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby, - .btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain, + .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi, + .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby, + .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain, .btc_set_policy = rtw89_btc_set_policy_v1, }; @@ -2587,7 +783,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, - .max_amsdu_limit = 3500, + .max_amsdu_limit = 5000, .dis_2g_40m_ul_ofdma = true, .rsvd_ple_ofst = 0x2f800, .hfc_param_ini = rtw8852b_hfc_param_ini_pcie, @@ -2673,7 +869,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8852b_c2h_regs, .page_regs = &rtw8852b_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8852b_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = &rtw8852b_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.h b/drivers/net/wireless/realtek/rtw89/rtw8852b.h index 4f9b3d476879..5ec7180fd355 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.h @@ -10,128 +10,6 @@ #define RF_PATH_NUM_8852B 2 #define BB_PATH_NUM_8852B 2 -enum rtw8852b_pmac_mode { - NONE_TEST, - PKTS_TX, - PKTS_RX, - CONT_TX -}; - -struct rtw8852b_u_efuse { - u8 rsvd[0x88]; - u8 mac_addr[ETH_ALEN]; -}; - -struct rtw8852b_e_efuse { - u8 mac_addr[ETH_ALEN]; -}; - -struct rtw8852b_tssi_offset { - u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM]; - u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM]; - u8 rsvd[7]; - u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM]; -} __packed; - -struct rtw8852b_efuse { - u8 rsvd[0x210]; - struct rtw8852b_tssi_offset path_a_tssi; - u8 rsvd1[10]; - struct rtw8852b_tssi_offset path_b_tssi; - u8 rsvd2[94]; - u8 channel_plan; - u8 xtal_k; - u8 rsvd3; - u8 iqk_lck; - u8 rsvd4[5]; - u8 reg_setting:2; - u8 tx_diversity:1; - u8 rx_diversity:2; - u8 ac_mode:1; - u8 module_type:2; - u8 rsvd5; - u8 shared_ant:1; - u8 coex_type:3; - u8 ant_iso:1; - u8 radio_on_off:1; - u8 rsvd6:2; - u8 eeprom_version; - u8 customer_id; - u8 tx_bb_swing_2g; - u8 tx_bb_swing_5g; - u8 tx_cali_pwr_trk_mode; - u8 trx_path_selection; - u8 rfe_type; - u8 country_code[2]; - u8 rsvd7[3]; - u8 path_a_therm; - u8 path_b_therm; - u8 rsvd8[2]; - u8 rx_gain_2g_ofdm; - u8 rsvd9; - u8 rx_gain_2g_cck; - u8 rsvd10; - u8 rx_gain_5g_low; - u8 rsvd11; - u8 rx_gain_5g_mid; - u8 rsvd12; - u8 rx_gain_5g_high; - u8 rsvd13[35]; - u8 path_a_cck_pwr_idx[6]; - u8 path_a_bw40_1tx_pwr_idx[5]; - u8 path_a_ofdm_1tx_pwr_idx_diff:4; - u8 path_a_bw20_1tx_pwr_idx_diff:4; - u8 path_a_bw20_2tx_pwr_idx_diff:4; - u8 path_a_bw40_2tx_pwr_idx_diff:4; - u8 path_a_cck_2tx_pwr_idx_diff:4; - u8 path_a_ofdm_2tx_pwr_idx_diff:4; - u8 rsvd14[0xf2]; - union { - struct rtw8852b_u_efuse u; - struct rtw8852b_e_efuse e; - }; -} __packed; - -struct rtw8852b_bb_pmac_info { - u8 en_pmac_tx:1; - u8 is_cck:1; - u8 mode:3; - u8 rsvd:3; - u16 tx_cnt; - u16 period; - u16 tx_time; - u8 duty_cycle; -}; - -struct rtw8852b_bb_tssi_bak { - u8 tx_path; - u8 rx_path; - u32 p0_rfmode; - u32 p0_rfmode_ftm; - u32 p1_rfmode; - u32 p1_rfmode_ftm; - s16 tx_pwr; /* S9 */ -}; - extern const struct rtw89_chip_info rtw8852b_chip_info; -void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev); -void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx); -void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, - u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx); -void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, - enum rtw89_phy_idx idx); -void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path); -void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path); -void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx idx, u8 mode); -void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - struct rtw8852b_bb_tssi_bak *bak); -void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - const struct rtw8852b_bb_tssi_bak *bak); - #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c new file mode 100644 index 000000000000..1745c2882acf --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -0,0 +1,2053 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852b_common.h" +#include "util.h" + +static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = { + {0x4580, 0x0000ffff, 0x0}, + {0x4580, 0xffff0000, 0x0}, + {0x4584, 0x0000ffff, 0x0}, + {0x4584, 0xffff0000, 0x0}, + {0x4580, 0x0000ffff, 0x1}, + {0x4578, 0x00ffffff, 0x2018b}, + {0x4570, 0x03ffffff, 0x7}, + {0x4574, 0x03ffffff, 0x32407}, + {0x45b8, 0x00000010, 0x0}, + {0x45b8, 0x00000100, 0x0}, + {0x45b8, 0x00000080, 0x0}, + {0x45b8, 0x00000008, 0x0}, + {0x45a0, 0x0000ff00, 0x0}, + {0x45a0, 0xff000000, 0x1}, + {0x45a4, 0x0000ff00, 0x2}, + {0x45a4, 0xff000000, 0x3}, + {0x45b8, 0x00000020, 0x0}, + {0x4568, 0xe0000000, 0x0}, + {0x45b8, 0x00000002, 0x1}, + {0x456c, 0xe0000000, 0x0}, + {0x45b4, 0x00006000, 0x0}, + {0x45b4, 0x00001800, 0x1}, + {0x45b8, 0x00000040, 0x0}, + {0x45b8, 0x00000004, 0x0}, + {0x45b8, 0x00000200, 0x0}, + {0x4598, 0xf8000000, 0x0}, + {0x45b8, 0x00100000, 0x0}, + {0x45a8, 0x00000fc0, 0x0}, + {0x45b8, 0x00200000, 0x0}, + {0x45b0, 0x00000038, 0x0}, + {0x45b0, 0x000001c0, 0x0}, + {0x45a0, 0x000000ff, 0x0}, + {0x45b8, 0x00400000, 0x0}, + {0x4590, 0x000007ff, 0x0}, + {0x45b0, 0x00000e00, 0x0}, + {0x45ac, 0x0000001f, 0x0}, + {0x45b8, 0x00800000, 0x0}, + {0x45a8, 0x0003f000, 0x0}, + {0x45b8, 0x01000000, 0x0}, + {0x45b0, 0x00007000, 0x0}, + {0x45b0, 0x00038000, 0x0}, + {0x45a0, 0x00ff0000, 0x0}, + {0x45b8, 0x02000000, 0x0}, + {0x4590, 0x003ff800, 0x0}, + {0x45b0, 0x001c0000, 0x0}, + {0x45ac, 0x000003e0, 0x0}, + {0x45b8, 0x04000000, 0x0}, + {0x45a8, 0x00fc0000, 0x0}, + {0x45b8, 0x08000000, 0x0}, + {0x45b0, 0x00e00000, 0x0}, + {0x45b0, 0x07000000, 0x0}, + {0x45a4, 0x000000ff, 0x0}, + {0x45b8, 0x10000000, 0x0}, + {0x4594, 0x000007ff, 0x0}, + {0x45b0, 0x38000000, 0x0}, + {0x45ac, 0x00007c00, 0x0}, + {0x45b8, 0x20000000, 0x0}, + {0x45a8, 0x3f000000, 0x0}, + {0x45b8, 0x40000000, 0x0}, + {0x45b4, 0x00000007, 0x0}, + {0x45b4, 0x00000038, 0x0}, + {0x45a4, 0x00ff0000, 0x0}, + {0x45b8, 0x80000000, 0x0}, + {0x4594, 0x003ff800, 0x0}, + {0x45b4, 0x000001c0, 0x0}, + {0x4598, 0xf8000000, 0x0}, + {0x45b8, 0x00100000, 0x0}, + {0x45a8, 0x00000fc0, 0x7}, + {0x45b8, 0x00200000, 0x0}, + {0x45b0, 0x00000038, 0x0}, + {0x45b0, 0x000001c0, 0x0}, + {0x45a0, 0x000000ff, 0x0}, + {0x45b4, 0x06000000, 0x0}, + {0x45b0, 0x00000007, 0x0}, + {0x45b8, 0x00080000, 0x0}, + {0x45a8, 0x0000003f, 0x0}, + {0x457c, 0xffe00000, 0x1}, + {0x4530, 0xffffffff, 0x0}, + {0x4588, 0x00003fff, 0x0}, + {0x4598, 0x000001ff, 0x0}, + {0x4534, 0xffffffff, 0x0}, + {0x4538, 0xffffffff, 0x0}, + {0x453c, 0xffffffff, 0x0}, + {0x4588, 0x0fffc000, 0x0}, + {0x4598, 0x0003fe00, 0x0}, + {0x4540, 0xffffffff, 0x0}, + {0x4544, 0xffffffff, 0x0}, + {0x4548, 0xffffffff, 0x0}, + {0x458c, 0x00003fff, 0x0}, + {0x4598, 0x07fc0000, 0x0}, + {0x454c, 0xffffffff, 0x0}, + {0x4550, 0xffffffff, 0x0}, + {0x4554, 0xffffffff, 0x0}, + {0x458c, 0x0fffc000, 0x0}, + {0x459c, 0x000001ff, 0x0}, + {0x4558, 0xffffffff, 0x0}, + {0x455c, 0xffffffff, 0x0}, + {0x4530, 0xffffffff, 0x4e790001}, + {0x4588, 0x00003fff, 0x0}, + {0x4598, 0x000001ff, 0x1}, + {0x4534, 0xffffffff, 0x0}, + {0x4538, 0xffffffff, 0x4b}, + {0x45ac, 0x38000000, 0x7}, + {0x4588, 0xf0000000, 0x0}, + {0x459c, 0x7e000000, 0x0}, + {0x45b8, 0x00040000, 0x0}, + {0x45b8, 0x00020000, 0x0}, + {0x4590, 0xffc00000, 0x0}, + {0x45b8, 0x00004000, 0x0}, + {0x4578, 0xff000000, 0x0}, + {0x45b8, 0x00000400, 0x0}, + {0x45b8, 0x00000800, 0x0}, + {0x45b8, 0x00001000, 0x0}, + {0x45b8, 0x00002000, 0x0}, + {0x45b4, 0x00018000, 0x0}, + {0x45ac, 0x07800000, 0x0}, + {0x45b4, 0x00000600, 0x2}, + {0x459c, 0x0001fe00, 0x80}, + {0x45ac, 0x00078000, 0x3}, + {0x459c, 0x01fe0000, 0x1}, +}; + +static const struct rtw89_reg3_def rtw8852bx_btc_preagc_en_defs[] = { + {0x46D0, GENMASK(1, 0), 0x3}, + {0x4790, GENMASK(1, 0), 0x3}, + {0x4AD4, GENMASK(31, 0), 0xf}, + {0x4AE0, GENMASK(31, 0), 0xf}, + {0x4688, GENMASK(31, 24), 0x80}, + {0x476C, GENMASK(31, 24), 0x80}, + {0x4694, GENMASK(7, 0), 0x80}, + {0x4694, GENMASK(15, 8), 0x80}, + {0x4778, GENMASK(7, 0), 0x80}, + {0x4778, GENMASK(15, 8), 0x80}, + {0x4AE4, GENMASK(23, 0), 0x780D1E}, + {0x4AEC, GENMASK(23, 0), 0x780D1E}, + {0x469C, GENMASK(31, 26), 0x34}, + {0x49F0, GENMASK(31, 26), 0x34}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_en_defs); + +static const struct rtw89_reg3_def rtw8852bx_btc_preagc_dis_defs[] = { + {0x46D0, GENMASK(1, 0), 0x0}, + {0x4790, GENMASK(1, 0), 0x0}, + {0x4AD4, GENMASK(31, 0), 0x60}, + {0x4AE0, GENMASK(31, 0), 0x60}, + {0x4688, GENMASK(31, 24), 0x1a}, + {0x476C, GENMASK(31, 24), 0x1a}, + {0x4694, GENMASK(7, 0), 0x2a}, + {0x4694, GENMASK(15, 8), 0x2a}, + {0x4778, GENMASK(7, 0), 0x2a}, + {0x4778, GENMASK(15, 8), 0x2a}, + {0x4AE4, GENMASK(23, 0), 0x79E99E}, + {0x4AEC, GENMASK(23, 0), 0x79E99E}, + {0x469C, GENMASK(31, 26), 0x26}, + {0x49F0, GENMASK(31, 26), 0x26}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_dis_defs); + +static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse, + struct rtw8852bx_efuse *map) +{ + ether_addr_copy(efuse->addr, map->e.mac_addr); + efuse->rfe_type = map->rfe_type; + efuse->xtal_cap = map->xtal_k; +} + +static void rtw8852bx_efuse_parsing_tssi(struct rtw89_dev *rtwdev, + struct rtw8852bx_efuse *map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + struct rtw8852bx_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi}; + u8 i, j; + + tssi->thermal[RF_PATH_A] = map->path_a_therm; + tssi->thermal[RF_PATH_B] = map->path_b_therm; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi, + sizeof(ofst[i]->cck_tssi)); + + for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n", + i, j, tssi->tssi_cck[i][j]); + + memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi, + sizeof(ofst[i]->bw40_tssi)); + memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM, + ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g)); + + for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n", + i, j, tssi->tssi_mcs[i][j]); + } +} + +static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low) +{ + if (high) + *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3); + if (low) + *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3); + + return data != 0xff; +} + +static void rtw8852bx_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev, + struct rtw8852bx_efuse *map) +{ + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + bool valid = false; + + valid |= _decode_efuse_gain(map->rx_gain_2g_cck, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]); + valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]); + valid |= _decode_efuse_gain(map->rx_gain_5g_low, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]); + valid |= _decode_efuse_gain(map->rx_gain_5g_mid, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]); + valid |= _decode_efuse_gain(map->rx_gain_5g_high, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]); + + gain->offset_valid = valid; +} + +static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block) +{ + struct rtw89_efuse *efuse = &rtwdev->efuse; + struct rtw8852bx_efuse *map; + + map = (struct rtw8852bx_efuse *)log_map; + + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + rtw8852bx_efuse_parsing_tssi(rtwdev, map); + rtw8852bx_efuse_parsing_gain_offset(rtwdev, map); + + switch (rtwdev->hci.type) { + case RTW89_HCI_TYPE_PCIE: + rtw8852be_efuse_parsing(efuse, map); + break; + default: + return -EOPNOTSUPP; + } + + rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type); + + return 0; +} + +static void rtw8852bx_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ +#define PWR_K_CHK_OFFSET 0x5E9 +#define PWR_K_CHK_VALUE 0xAA + u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr; + + if (phycap_map[offset] == PWR_K_CHK_VALUE) + rtwdev->efuse.power_k_valid = true; +} + +static void rtw8852bx_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + static const u32 tssi_trim_addr[RF_PATH_NUM_8852BX] = {0x5D6, 0x5AB}; + u32 addr = rtwdev->chip->phycap_addr; + bool pg = false; + u32 ofst; + u8 i, j; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) { + /* addrs are in decreasing order */ + ofst = tssi_trim_addr[i] - addr - j; + tssi->tssi_trim[i][j] = phycap_map[ofst]; + + if (phycap_map[ofst] != 0xff) + pg = true; + } + } + + if (!pg) { + memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim)); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM] no PG, set all trim info to 0\n"); + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n", + i, j, tssi->tssi_trim[i][j], + tssi_trim_addr[i] - j); +} + +static void rtw8852bx_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 thm_trim_addr[RF_PATH_NUM_8852BX] = {0x5DF, 0x5DC}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n", + i, info->thermal_trim[i]); + + if (info->thermal_trim[i] != 0xff) + info->pg_thermal_trim = true; + } +} + +static void rtw8852bx_thermal_trim(struct rtw89_dev *rtwdev) +{ +#define __thm_setting(raw) \ +({ \ + u8 __v = (raw); \ + ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \ +}) + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 i, val; + + if (!info->pg_thermal_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + val = __thm_setting(info->thermal_trim[i]); + rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n", + i, val); + } +#undef __thm_setting +} + +static void rtw8852bx_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 pabias_trim_addr[RF_PATH_NUM_8852BX] = {0x5DE, 0x5DB}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n", + i, info->pa_bias_trim[i]); + + if (info->pa_bias_trim[i] != 0xff) + info->pg_pa_bias_trim = true; + } +} + +static void rtw8852bx_pa_bias_trim(struct rtw89_dev *rtwdev) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 pabias_2g, pabias_5g; + u8 i; + + if (!info->pg_pa_bias_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]); + pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n", + i, pabias_2g, pabias_5g); + + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g); + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g); + } +} + +static void rtw8852bx_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = { + {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8}, + {0x590, 0x58F, 0, 0x58E, 0x58D}, + }; + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + u32 phycap_addr = rtwdev->chip->phycap_addr; + bool valid = false; + int path, i; + u8 data; + + for (path = 0; path < 2; path++) + for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) { + if (comp_addrs[path][i] == 0) + continue; + + data = phycap_map[comp_addrs[path][i] - phycap_addr]; + valid |= _decode_efuse_gain(data, NULL, + &gain->comp[path][i]); + } + + gain->comp_valid = valid; +} + +static int __rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + rtw8852bx_phycap_parsing_power_cal(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_tssi(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_thermal_trim(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_pa_bias_trim(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_gain_comp(rtwdev, phycap_map); + + return 0; +} + +static void __rtw8852bx_power_trim(struct rtw89_dev *rtwdev) +{ + rtw8852bx_thermal_trim(rtwdev); + rtw8852bx_pa_bias_trim(rtwdev); +} + +static void __rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + u8 mac_idx) +{ + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); + u8 txsc20 = 0, txsc40 = 0; + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_80: + txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40); + fallthrough; + case RTW89_CHANNEL_WIDTH_40: + txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20); + break; + default: + break; + } + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_80: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1)); + rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4)); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0)); + rtw89_write32(rtwdev, sub_carr, txsc20); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK); + rtw89_write32(rtwdev, sub_carr, 0); + break; + default: + break; + } + + if (chan->channel > 14) { + rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE); + rtw89_write8_set(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); + } else { + rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE); + rtw89_write8_clr(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); + } +} + +static const u32 rtw8852bx_sco_barker_threshold[14] = { + 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6, + 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4 +}; + +static const u32 rtw8852bx_sco_cck_threshold[14] = { + 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724, + 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed +}; + +static void rtw8852bx_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch) +{ + u8 ch_element = primary_ch - 1; + + rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH, + rtw8852bx_sco_barker_threshold[ch_element]); + rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH, + rtw8852bx_sco_cck_threshold[ch_element]); +} + +static u8 rtw8852bx_sco_mapping(u8 central_ch) +{ + if (central_ch == 1) + return 109; + else if (central_ch >= 2 && central_ch <= 6) + return 108; + else if (central_ch >= 7 && central_ch <= 10) + return 107; + else if (central_ch >= 11 && central_ch <= 14) + return 106; + else if (central_ch == 36 || central_ch == 38) + return 51; + else if (central_ch >= 40 && central_ch <= 58) + return 50; + else if (central_ch >= 60 && central_ch <= 64) + return 49; + else if (central_ch == 100 || central_ch == 102) + return 48; + else if (central_ch >= 104 && central_ch <= 126) + return 47; + else if (central_ch >= 128 && central_ch <= 151) + return 46; + else if (central_ch >= 153 && central_ch <= 177) + return 45; + else + return 0; +} + +struct rtw8852bx_bb_gain { + u32 gain_g[BB_PATH_NUM_8852BX]; + u32 gain_a[BB_PATH_NUM_8852BX]; + u32 gain_mask; +}; + +static const struct rtw8852bx_bb_gain bb_gain_lna[LNA_GAIN_NUM] = { + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x000000ff }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x0000ff00 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x000000ff }, +}; + +static const struct rtw8852bx_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0xff000000 }, +}; + +static void rtw8852bx_set_gain_error(struct rtw89_dev *rtwdev, + enum rtw89_subband subband, + enum rtw89_rf_path path) +{ + const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; + u8 gain_band = rtw89_subband_to_bb_gain_band(subband); + s32 val; + u32 reg; + u32 mask; + int i; + + for (i = 0; i < LNA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_lna[i].gain_g[path]; + else + reg = bb_gain_lna[i].gain_a[path]; + + mask = bb_gain_lna[i].gain_mask; + val = gain->lna_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } + + for (i = 0; i < TIA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_tia[i].gain_g[path]; + else + reg = bb_gain_tia[i].gain_a[path]; + + mask = bb_gain_tia[i].gain_mask; + val = gain->tia_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } +} + +static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev, + s8 ext_loss_a, s8 ext_loss_b) +{ + s8 ext_loss_avg; + u64 linear; + u8 pwrofst; + + if (ext_loss_a == ext_loss_b) { + ext_loss_avg = ext_loss_a; + } else { + linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1; + linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS); + ext_loss_avg = rtw89_linear_2_db(linear); + ext_loss_avg += min(ext_loss_a, ext_loss_b); + } + + pwrofst = max(DIV_ROUND_CLOSEST(ext_loss_avg, 4) + 16, EDCCA_PWROFST_DEFAULT); + + rtw89_phy_write32_mask(rtwdev, R_PWOFST, B_PWOFST, pwrofst); +} + +static void rtw8852bx_set_gain_offset(struct rtw89_dev *rtwdev, + enum rtw89_subband subband, + enum rtw89_phy_idx phy_idx) +{ + static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD}; + static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1, + R_PATH1_G_TIA1_LNA6_OP1DB_V1}; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain; + enum rtw89_gain_offset gain_ofdm_band; + s8 ext_loss_a = 0, ext_loss_b = 0; + s32 offset_a, offset_b; + s32 offset_ofdm, offset_cck; + s32 tmp; + u8 path; + + if (!efuse_gain->comp_valid) + goto next; + + for (path = RF_PATH_A; path < BB_PATH_NUM_8852BX; path++) { + tmp = efuse_gain->comp[path][subband]; + tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp); + } + +next: + if (!efuse_gain->offset_valid) + goto ext_loss; + + gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband); + + offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; + offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; + + tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp); + + tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp); + + if (hal->antenna_rx == RF_B) { + offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; + offset_cck = -efuse_gain->offset[RF_PATH_B][0]; + } else { + offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; + offset_cck = -efuse_gain->offset[RF_PATH_A][0]; + } + + tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0]; + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); + + tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0]; + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); + + if (subband == RTW89_CH_2G) { + tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1); + tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1); + rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST, + B_RX_RPL_OFST_CCK_MASK, tmp); + } + + ext_loss_a = (offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2); + ext_loss_b = (offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2); + +ext_loss: + if (rtwdev->chip->chip_id == RTL8852BT) + rtw8852bt_ext_loss_avg_update(rtwdev, ext_loss_a, ext_loss_b); +} + +static +void rtw8852bx_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband) +{ + const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; + u8 band = rtw89_subband_to_bb_gain_band(subband); + u32 val; + + val = u32_encode_bits((gain->rpl_ofst_20[band][RF_PATH_A] + + gain->rpl_ofst_20[band][RF_PATH_B]) >> 1, B_P0_RPL1_20_MASK) | + u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][0] + + gain->rpl_ofst_40[band][RF_PATH_B][0]) >> 1, B_P0_RPL1_40_MASK) | + u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][1] + + gain->rpl_ofst_40[band][RF_PATH_B][1]) >> 1, B_P0_RPL1_41_MASK); + val >>= B_P0_RPL1_SHIFT; + rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val); + rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val); + + val = u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][2] + + gain->rpl_ofst_40[band][RF_PATH_B][2]) >> 1, B_P0_RTL2_42_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][0] + + gain->rpl_ofst_80[band][RF_PATH_B][0]) >> 1, B_P0_RTL2_80_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][1] + + gain->rpl_ofst_80[band][RF_PATH_B][1]) >> 1, B_P0_RTL2_81_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][10] + + gain->rpl_ofst_80[band][RF_PATH_B][10]) >> 1, B_P0_RTL2_8A_MASK); + rtw89_phy_write32(rtwdev, R_P0_RPL2, val); + rtw89_phy_write32(rtwdev, R_P1_RPL2, val); + + val = u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][2] + + gain->rpl_ofst_80[band][RF_PATH_B][2]) >> 1, B_P0_RTL3_82_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][3] + + gain->rpl_ofst_80[band][RF_PATH_B][3]) >> 1, B_P0_RTL3_83_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][4] + + gain->rpl_ofst_80[band][RF_PATH_B][4]) >> 1, B_P0_RTL3_84_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][9] + + gain->rpl_ofst_80[band][RF_PATH_B][9]) >> 1, B_P0_RTL3_89_MASK); + rtw89_phy_write32(rtwdev, R_P0_RPL3, val); + rtw89_phy_write32(rtwdev, R_P1_RPL3, val); +} + +static void rtw8852bx_ctrl_ch(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u8 central_ch = chan->channel; + u8 subband = chan->subband_type; + u8 sco_comp; + bool is_2g = central_ch <= 14; + + /* Path A */ + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx); + + /* Path B */ + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx); + + /* SCO compensate FC setting */ + sco_comp = rtw8852bx_sco_mapping(central_ch); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx); + + if (chan->band_type == RTW89_BAND_6G) + return; + + /* CCK parameters */ + if (central_ch == 14) { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a); + } else { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5); + } + + rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_A); + rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_B); + rtw8852bx_set_gain_offset(rtwdev, subband, phy_idx); + rtw8852bx_set_rxsc_rpl_comp(rtwdev, subband); +} + +static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + static const u32 adc_sel[2] = {0xC0EC, 0xC1EC}; + static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + } +} + +static +void rtw8852bt_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + static const u32 rck_reset_count[2] = {0xC0E8, 0xC1E8}; + static const u32 adc_op5_bw_sel[2] = {0xC0D8, 0xC1D8}; + static const u32 adc_sample_td[2] = {0xC0D4, 0xC1D4}; + static const u32 adc_rst_cycle[2] = {0xC0EC, 0xC1EC}; + static const u32 decim_filter[2] = {0xC0EC, 0xC1EC}; + static const u32 rck_offset[2] = {0xC0C4, 0xC1C4}; + static const u32 rx_adc_clk[2] = {0x12A0, 0x32A0}; + static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; + static const u32 idac2_1[2] = {0xC0D4, 0xC1D4}; + static const u32 idac2[2] = {0xC0D4, 0xC1D4}; + static const u32 upd_clk_adc = {0x704}; + + if (rtwdev->chip->chip_id != RTL8852BT) + return; + + rtw89_phy_write32_mask(rtwdev, idac2[path], B_P0_CFCH_CTL, 0x8); + rtw89_phy_write32_mask(rtwdev, rck_reset_count[path], B_ADCMOD_LP, 0x9); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], B_WDADC_SEL, 0x2); + rtw89_phy_write32_mask(rtwdev, rx_adc_clk[path], B_P0_RXCK_ADJ, 0x49); + rtw89_phy_write32_mask(rtwdev, decim_filter[path], B_DCIM_FR, 0x0); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0xf); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 0); + + if (rtwdev->efuse.rfe_type >= 51) + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x2); + else + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x8); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0); + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 1); + break; + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x4); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x6); + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 2); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + break; + } +} + +static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, + enum rtw89_phy_idx phy_idx) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 rx_path_0; + u32 val; + + rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, phy_idx); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x1, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, + pri_ch, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + /*CCK primary channel */ + if (pri_ch == RTW89_SC_20_UPPER) + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1); + else + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0); + + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, + pri_ch, phy_idx); + + /*Set RF mode at A */ + val = chip_id == RTL8852BT ? 0x333 : 0xaaa; + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, val, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, val, phy_idx); + break; + default: + rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, + pri_ch); + } + + if (chip_id == RTL8852B) { + rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A); + rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B); + } else if (chip_id == RTL8852BT) { + rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_A); + rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_B); + } + + if (rx_path_0 == 0x1) + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx); + else if (rx_path_0 == 0x2) + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx); +} + +static void rtw8852bx_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en) +{ + if (cck_en) { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1); + } +} + +static void rtw8852bx_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u8 pri_ch = chan->pri_ch_idx; + bool mask_5m_low; + bool mask_5m_en; + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_40: + /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */ + mask_5m_en = true; + mask_5m_low = pri_ch == RTW89_SC_20_LOWER; + break; + case RTW89_CHANNEL_WIDTH_80: + /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */ + mask_5m_en = pri_ch == RTW89_SC_20_UPMOST || + pri_ch == RTW89_SC_20_LOWEST; + mask_5m_low = pri_ch == RTW89_SC_20_LOWEST; + break; + default: + mask_5m_en = false; + break; + } + + if (!mask_5m_en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0); + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, + B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx); + return; + } + + if (mask_5m_low) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0); + } + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, + B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx); +} + +static void __rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + fsleep(1); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); +} + +static void rtw8852bx_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u32 addr; + + for (addr = R_AX_PWR_MACID_LMT_TABLE0; + addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); +} + +static void __rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + + rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP); + rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP); + + rtw8852bx_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); + + /* read these registers after loading BB parameters */ + gain->offset_base[RTW89_PHY_0] = + rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK); + gain->rssi_base[RTW89_PHY_0] = + rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK); +} + +static void rtw8852bx_bb_set_pop(struct rtw89_dev *rtwdev) +{ + if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) + rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN); +} + +static u32 rtw8852bt_spur_freq(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + u8 center_chan = chan->channel; + + switch (chan->band_type) { + case RTW89_BAND_5G: + if (center_chan == 151 || center_chan == 153 || + center_chan == 155 || center_chan == 163) + return 5760; + break; + default: + break; + } + + return 0; +} + +#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */ +#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */ +#define MAX_TONE_NUM 2048 + +static void rtw8852bt_set_csi_tone_idx(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + s32 freq_diff, csi_idx, csi_tone_idx; + u32 spur_freq; + + spur_freq = rtw8852bt_spur_freq(rtwdev, chan); + if (spur_freq == 0) { + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, + 0, phy_idx); + return; + } + + freq_diff = (spur_freq - chan->freq) * 1000000; + csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125); + s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx); + + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_V1, B_SEG0CSI_IDX, + csi_tone_idx, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, 1, phy_idx); +} + +static +void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + bool cck_en = chan->channel <= 14; + u8 pri_ch_idx = chan->pri_ch_idx; + u8 band = chan->band_type, chan_idx; + + if (cck_en) + rtw8852bx_ctrl_sco_cck(rtwdev, chan->primary_channel); + + rtw8852bx_ctrl_ch(rtwdev, chan, phy_idx); + rtw8852bx_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx); + rtw8852bx_ctrl_cck_en(rtwdev, cck_en); + if (chip_id == RTL8852BT) + rtw8852bt_set_csi_tone_idx(rtwdev, chan, phy_idx); + if (chip_id == RTL8852B && chan->band_type == RTW89_BAND_5G) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); + } + chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band); + rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx); + rtw8852bx_5m_mask(rtwdev, chan, phy_idx); + rtw8852bx_bb_set_pop(rtwdev); + __rtw8852bx_bb_reset_all(rtwdev, phy_idx); +} + +static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, s16 ref) +{ + const u16 tssi_16dbm_cw = 0x12c; + const u8 base_cw_0db = 0x27; + const s8 ofst_int = 0; + s16 pwr_s10_3; + s16 rf_pwr_cw; + u16 bb_pwr_cw; + u32 pwr_cw; + u32 tssi_ofst_cw; + + pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); + bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0)); + rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3)); + rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); + pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; + + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", + tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); + + return u32_encode_bits(tssi_ofst_cw, B_DPD_TSSI_CW) | + u32_encode_bits(pwr_cw, B_DPD_PWR_CW) | + u32_encode_bits(ref, B_DPD_REF); +} + +static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800}; + const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF; + const u8 ofst_ofdm = 0x4; + const u8 ofst_cck = 0x8; + const s16 ref_ofdm = 0; + const s16 ref_cck = 0; + u32 val; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, + B_AX_PWR_REF, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, + phy_idx); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, + phy_idx); +} + +static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + u8 tx_shape_idx, + enum rtw89_phy_idx phy_idx) +{ +#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2)) +#define __DFIR_CFG_MASK 0xffffffff +#define __DFIR_CFG_NR 8 +#define __DECL_DFIR_PARAM(_name, _val...) \ + static const u32 param_ ## _name[] = {_val}; \ + static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR) + + __DECL_DFIR_PARAM(flat, + 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053, + 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5); + __DECL_DFIR_PARAM(sharp, + 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090, + 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5); + __DECL_DFIR_PARAM(sharp_14, + 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E, + 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A); + u8 ch = chan->channel; + const u32 *param; + u32 addr; + int i; + + if (ch > 14) { + rtw89_warn(rtwdev, + "set tx shape dfir by unknown ch: %d on 2G\n", ch); + return; + } + + if (ch == 14) + param = param_sharp_14; + else + param = tx_shape_idx == 0 ? param_flat : param_sharp; + + for (i = 0; i < __DFIR_CFG_NR; i++) { + addr = __DFIR_CFG_ADDR(i); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]); + rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i], + phy_idx); + } + +#undef __DECL_DFIR_PARAM +#undef __DFIR_CFG_NR +#undef __DFIR_CFG_MASK +#undef __DECL_CFG_ADDR +} + +static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; + u8 band = chan->band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; + + if (band == RTW89_BAND_2G) + rtw8852bx_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); + + rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, + tx_shape_ofdm); +} + +static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx); + rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); +} + +static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_set_txpwr_ref(rtwdev, phy_idx); +} + +static +void __rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx) +{ + u32 reg; + + if (pw_ofst < -16 || pw_ofst > 15) { + rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst); + return; + } + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); + + pw_ofst = max_t(s8, pw_ofst - 3, -16); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); +} + +static int +__rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + int ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); + if (ret) + return ret; + + rtw8852bx_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ? + RTW89_MAC_1 : RTW89_MAC_0); + + return 0; +} + +static +void __rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev) +{ + const struct rtw89_reg3_def *def = rtw8852bx_pmac_ht20_mcs7_tbl; + u8 i; + + for (i = 0; i < ARRAY_SIZE(rtw8852bx_pmac_ht20_mcs7_tbl); i++, def++) + rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); +} + +static void rtw8852bx_stop_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx"); + if (tx_info->mode == CONT_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx); + else if (tx_info->mode == PKTS_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx); +} + +static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + enum rtw8852bx_pmac_mode mode = tx_info->mode; + u32 pkt_cnt = tx_info->tx_cnt; + u16 period = tx_info->period; + + if (mode == CONT_TX && !tx_info->is_cck) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start"); + } else if (mode == PKTS_TX) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, + B_PMAC_TX_PRD_MSK, period, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK, + pkt_cnt, idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start"); + } + + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx); +} + +static +void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + + if (!tx_info->en_pmac_tx) { + rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); + if (chan->band_type == RTW89_BAND_2G) + rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS); + return; + } + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable"); + + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx); + rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx); + + rtw8852bx_start_pmac_tx(rtwdev, tx_info, idx); +} + +static +void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx) +{ + struct rtw8852bx_bb_pmac_info tx_info = {0}; + + tx_info.en_pmac_tx = enable; + tx_info.is_cck = 0; + tx_info.mode = PKTS_TX; + tx_info.tx_cnt = tx_cnt; + tx_info.period = period; + tx_info.tx_time = tx_time; + + rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx); +} + +static +void __rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm); + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx); +} + +static +void __rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) +{ + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path); + + if (tx_path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_B) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_AB) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4); + } else { + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path"); + } +} + +static +void __rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode) +{ + if (mode != 0) + return; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch"); + + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); +} + +static +void __rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak) +{ + s32 tmp; + + bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx); + bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx); + bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx); + bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx); + bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx); + bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx); + tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx); + bak->tx_pwr = sign_extend32(tmp, 8); +} + +static +void __rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx); + if (bak->tx_path == RF_AB) + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4); + else + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx); + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx); + rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); +} + +static void __rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852bx_btc_preagc_en_defs_tbl : + &rtw8852bx_btc_preagc_dis_defs_tbl); +} + +static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + if (en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x20); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x1a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); + } +} + +static +void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u32 rst_mask0; + u32 rst_mask1; + + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else if (rx_path == RF_B) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else if (rx_path == RF_AB) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); + } + + rtw8852bx_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0); + + if (chan->band_type == RTW89_BAND_2G && + (rx_path == RF_B || rx_path == RF_AB)) + rtw8852bx_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); + else + rtw8852bx_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); + + rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; + rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); + } +} + +static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x333); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x111); + } else if (rx_path == RF_B) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x111); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x333); + } else if (rx_path == RF_AB) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x333); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x333); + } +} + +static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB; + + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path); + + if (rtwdev->hal.rx_nss == 1) { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); + } + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0); +} + +static u8 __rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + if (rtwdev->is_tssi_mode[rf_path]) { + u32 addr = 0x1c10 + (rf_path << 13); + + return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000); + } + + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + + fsleep(200); + + return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); +} + +static +void rtw8852bx_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) +{ + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void __rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_mac_ax_coex coex_params = { + .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, + .direction = RTW89_MAC_AX_COEX_INNER, + }; + + /* PTA init */ + rtw89_mac_coex_init(rtwdev, &coex_params); + + /* set WL Tx response = Hi-Pri */ + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); + + /* set rf gnt debug off */ + rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0); + + /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ + if (btc->ant_type == BTC_ANT_SHARED) { + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); + /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */ + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f); + } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff); + } + + if (rtwdev->chip->chip_id == RTL8852BT) { + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_RX_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_RX_GROUP, 0x5df); + } + + /* set PTA break table */ + rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM); + + /* enable BT counter 0xda40[16,2] = 2b'11 */ + rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN); + btc->cx.wl.status.map.init_ok = true; +} + +static +void __rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + u32 bitmap; + u32 reg; + + switch (map) { + case BTC_PRI_MASK_TX_RESP: + reg = R_BTC_BT_COEX_MSK_TABLE; + bitmap = B_BTC_PRI_MASK_TX_RESP_V1; + break; + case BTC_PRI_MASK_BEACON: + reg = R_AX_WL_PRI_MSK; + bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ; + break; + case BTC_PRI_MASK_RX_CCK: + reg = R_BTC_BT_COEX_MSK_TABLE; + bitmap = B_BTC_PRI_MASK_RXCCK_V1; + break; + default: + return; + } + + if (state) + rtw89_write32_set(rtwdev, reg, bitmap); + else + rtw89_write32_clr(rtwdev, reg, bitmap); +} + +static +s8 __rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + /* +6 for compensate offset */ + return clamp_t(s8, val + 6, -100, 0) + 100; +} + +static +void __rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + /* Feature move to firmware */ +} + +static void __rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31); + + /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ + if (state) + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179); + else + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20); + + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void rtw8852bx_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level) +{ + switch (level) { + case 0: /* default */ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); + break; + case 1: /* Fix LNA2=5 */ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); + break; + } +} + +static void __rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + switch (level) { + case 0: /* original */ + default: + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); + btc->dm.wl_lna2 = 0; + break; + case 1: /* for FDD free-run */ + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); + btc->dm.wl_lna2 = 0; + break; + case 2: /* for BTG Co-Rx*/ + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); + btc->dm.wl_lna2 = 1; + break; + } + + rtw8852bx_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2); +} + +static void rtw8852bx_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u16 chan = phy_ppdu->chan_idx; + enum nl80211_band band; + u8 ch; + + if (chan == 0) + return; + + rtw89_decode_chan_idx(rtwdev, chan, &ch, &band); + status->freq = ieee80211_channel_to_frequency(ch, band); + status->band = band; +} + +static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u8 path; + u8 *rx_power = phy_ppdu->rssi; + + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + status->chains |= BIT(path); + status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); + } + if (phy_ppdu->valid) + rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); +} + +static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 val32; + int ret; + + rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + + if (chip_id == RTL8852BT) { + val32 = rtw89_read32(rtwdev, R_AX_AFE_OFF_CTRL1); + val32 = u32_replace_bits(val32, 0x1, B_AX_S0_LDO_VSEL_F_MASK); + val32 = u32_replace_bits(val32, 0x1, B_AX_S1_LDO_VSEL_F_MASK); + rtw89_write32(rtwdev, R_AX_AFE_OFF_CTRL1, val32); + } + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7, + FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7, + FULL_BIT_MASK); + if (ret) + return ret; + + rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE); + + return 0; +} + +static int __rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + u8 wl_rfc_s0; + u8 wl_rfc_s1; + int ret; + + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + + ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0); + if (ret) + return ret; + wl_rfc_s0 &= ~XTAL_SI_RF00S_EN; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0, + FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1); + if (ret) + return ret; + wl_rfc_s1 &= ~XTAL_SI_RF10S_EN; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1, + FULL_BIT_MASK); + return ret; +} + +const struct rtw8852bx_info rtw8852bx_info = { + .mac_enable_bb_rf = __rtw8852bx_mac_enable_bb_rf, + .mac_disable_bb_rf = __rtw8852bx_mac_disable_bb_rf, + .bb_sethw = __rtw8852bx_bb_sethw, + .bb_reset_all = __rtw8852bx_bb_reset_all, + .bb_cfg_txrx_path = __rtw8852bx_bb_cfg_txrx_path, + .bb_cfg_tx_path = __rtw8852bx_bb_cfg_tx_path, + .bb_ctrl_rx_path = __rtw8852bx_bb_ctrl_rx_path, + .bb_set_plcp_tx = __rtw8852bx_bb_set_plcp_tx, + .bb_set_power = __rtw8852bx_bb_set_power, + .bb_set_pmac_pkt_tx = __rtw8852bx_bb_set_pmac_pkt_tx, + .bb_backup_tssi = __rtw8852bx_bb_backup_tssi, + .bb_restore_tssi = __rtw8852bx_bb_restore_tssi, + .bb_tx_mode_switch = __rtw8852bx_bb_tx_mode_switch, + .set_channel_mac = __rtw8852bx_set_channel_mac, + .set_channel_bb = __rtw8852bx_set_channel_bb, + .ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx, + .ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx, + .query_ppdu = __rtw8852bx_query_ppdu, + .read_efuse = __rtw8852bx_read_efuse, + .read_phycap = __rtw8852bx_read_phycap, + .power_trim = __rtw8852bx_power_trim, + .set_txpwr = __rtw8852bx_set_txpwr, + .set_txpwr_ctrl = __rtw8852bx_set_txpwr_ctrl, + .init_txpwr_unit = __rtw8852bx_init_txpwr_unit, + .set_txpwr_ul_tb_offset = __rtw8852bx_set_txpwr_ul_tb_offset, + .get_thermal = __rtw8852bx_get_thermal, + .adc_cfg = rtw8852bt_adc_cfg, + .btc_init_cfg = __rtw8852bx_btc_init_cfg, + .btc_set_wl_pri = __rtw8852bx_btc_set_wl_pri, + .btc_get_bt_rssi = __rtw8852bx_btc_get_bt_rssi, + .btc_update_bt_cnt = __rtw8852bx_btc_update_bt_cnt, + .btc_wl_s1_standby = __rtw8852bx_btc_wl_s1_standby, + .btc_set_wl_rx_gain = __rtw8852bx_btc_set_wl_rx_gain, +}; +EXPORT_SYMBOL(rtw8852bx_info); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B common routines"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h new file mode 100644 index 000000000000..801e7ab9f4fa --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BX_H__ +#define __RTW89_8852BX_H__ + +#include "core.h" + +#define RF_PATH_NUM_8852BX 2 +#define BB_PATH_NUM_8852BX 2 + +enum rtw8852bx_pmac_mode { + NONE_TEST, + PKTS_TX, + PKTS_RX, + CONT_TX +}; + +struct rtw8852bx_u_efuse { + u8 rsvd[0x88]; + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852bx_e_efuse { + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852bx_tssi_offset { + u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM]; + u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM]; + u8 rsvd[7]; + u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM]; +} __packed; + +struct rtw8852bx_efuse { + u8 rsvd[0x210]; + struct rtw8852bx_tssi_offset path_a_tssi; + u8 rsvd1[10]; + struct rtw8852bx_tssi_offset path_b_tssi; + u8 rsvd2[94]; + u8 channel_plan; + u8 xtal_k; + u8 rsvd3; + u8 iqk_lck; + u8 rsvd4[5]; + u8 reg_setting:2; + u8 tx_diversity:1; + u8 rx_diversity:2; + u8 ac_mode:1; + u8 module_type:2; + u8 rsvd5; + u8 shared_ant:1; + u8 coex_type:3; + u8 ant_iso:1; + u8 radio_on_off:1; + u8 rsvd6:2; + u8 eeprom_version; + u8 customer_id; + u8 tx_bb_swing_2g; + u8 tx_bb_swing_5g; + u8 tx_cali_pwr_trk_mode; + u8 trx_path_selection; + u8 rfe_type; + u8 country_code[2]; + u8 rsvd7[3]; + u8 path_a_therm; + u8 path_b_therm; + u8 rsvd8[2]; + u8 rx_gain_2g_ofdm; + u8 rsvd9; + u8 rx_gain_2g_cck; + u8 rsvd10; + u8 rx_gain_5g_low; + u8 rsvd11; + u8 rx_gain_5g_mid; + u8 rsvd12; + u8 rx_gain_5g_high; + u8 rsvd13[35]; + u8 path_a_cck_pwr_idx[6]; + u8 path_a_bw40_1tx_pwr_idx[5]; + u8 path_a_ofdm_1tx_pwr_idx_diff:4; + u8 path_a_bw20_1tx_pwr_idx_diff:4; + u8 path_a_bw20_2tx_pwr_idx_diff:4; + u8 path_a_bw40_2tx_pwr_idx_diff:4; + u8 path_a_cck_2tx_pwr_idx_diff:4; + u8 path_a_ofdm_2tx_pwr_idx_diff:4; + u8 rsvd14[0xf2]; + union { + struct rtw8852bx_u_efuse u; + struct rtw8852bx_e_efuse e; + }; +} __packed; + +struct rtw8852bx_bb_pmac_info { + u8 en_pmac_tx:1; + u8 is_cck:1; + u8 mode:3; + u8 rsvd:3; + u16 tx_cnt; + u16 period; + u16 tx_time; + u8 duty_cycle; +}; + +struct rtw8852bx_bb_tssi_bak { + u8 tx_path; + u8 rx_path; + u32 p0_rfmode; + u32 p0_rfmode_ftm; + u32 p1_rfmode; + u32 p1_rfmode_ftm; + s16 tx_pwr; /* S9 */ +}; + +struct rtw8852bx_info { + int (*mac_enable_bb_rf)(struct rtw89_dev *rtwdev); + int (*mac_disable_bb_rf)(struct rtw89_dev *rtwdev); + void (*bb_sethw)(struct rtw89_dev *rtwdev); + void (*bb_reset_all)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev); + void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path); + void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path); + void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev); + void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx); + void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx); + void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak); + void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak); + void (*bb_tx_mode_switch)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode); + void (*set_channel_mac)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, u8 mac_idx); + void (*set_channel_bb)(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + void (*query_ppdu)(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status); + int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block); + int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map); + void (*power_trim)(struct rtw89_dev *rtwdev); + void (*set_txpwr)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx); + int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx); + u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path); + void (*adc_cfg)(struct rtw89_dev *rtwdev, u8 bw, u8 path); + void (*btc_init_cfg)(struct rtw89_dev *rtwdev); + void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state); + s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val); + void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev); + void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state); + void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level); +}; + +extern const struct rtw8852bx_info rtw8852bx_info; + +static inline +int rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + return rtw8852bx_info.mac_enable_bb_rf(rtwdev); +} + +static inline +int rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + return rtw8852bx_info.mac_disable_bb_rf(rtwdev); +} + +static inline +void rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_sethw(rtwdev); +} + +static inline +void rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.bb_reset_all(rtwdev, phy_idx); +} + +static inline +void rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_cfg_txrx_path(rtwdev); +} + +static inline +void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) +{ + rtw8852bx_info.bb_cfg_tx_path(rtwdev, tx_path); +} + +static inline +void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path); +} + +static inline +void rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_set_plcp_tx(rtwdev); +} + +static inline +void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx) +{ + rtw8852bx_info.bb_set_power(rtwdev, pwr_dbm, idx); +} + +static inline +void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx) +{ + rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx); +} + +static inline +void rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw8852bx_info.bb_backup_tssi(rtwdev, idx, bak); +} + +static inline +void rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw8852bx_info.bb_restore_tssi(rtwdev, idx, bak); +} + +static inline +void rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode) +{ + rtw8852bx_info.bb_tx_mode_switch(rtwdev, idx, mode); +} + +static inline +void rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, u8 mac_idx) +{ + rtw8852bx_info.set_channel_mac(rtwdev, chan, mac_idx); +} + +static inline +void rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_channel_bb(rtwdev, chan, phy_idx); +} + +static inline +void rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.ctrl_nbtg_bt_tx(rtwdev, en, phy_idx); +} + +static inline +void rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.ctrl_btg_bt_rx(rtwdev, en, phy_idx); +} + +static inline +void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + rtw8852bx_info.query_ppdu(rtwdev, phy_ppdu, status); +} + +static inline +int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block) +{ + return rtw8852bx_info.read_efuse(rtwdev, log_map, block); +} + +static inline +int rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + return rtw8852bx_info.read_phycap(rtwdev, phycap_map); +} + +static inline +void rtw8852bx_power_trim(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.power_trim(rtwdev); +} + +static inline +void rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_txpwr(rtwdev, chan, phy_idx); +} + +static inline +void rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_txpwr_ctrl(rtwdev, phy_idx); +} + +static inline +int rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + return rtw8852bx_info.init_txpwr_unit(rtwdev, phy_idx); +} + +static inline +void rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx) +{ + rtw8852bx_info.set_txpwr_ul_tb_offset(rtwdev, pw_ofst, mac_idx); +} + +static inline +u8 rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + return rtw8852bx_info.get_thermal(rtwdev, rf_path); +} + +static inline +void rtw8852bx_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + rtw8852bx_info.adc_cfg(rtwdev, bw, path); +} + +static inline +void rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.btc_init_cfg(rtwdev); +} + +static inline +void rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + rtw8852bx_info.btc_set_wl_pri(rtwdev, map, state); +} + +static inline +s8 rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + return rtw8852bx_info.btc_get_bt_rssi(rtwdev, val); +} + +static inline +void rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.btc_update_bt_cnt(rtwdev); +} + +static inline +void rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw8852bx_info.btc_wl_s1_standby(rtwdev, state); +} + +static inline +void rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ + rtw8852bx_info.btc_set_wl_rx_gain(rtwdev, level); +} + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index 259df67836a0..12354612441c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -8,6 +8,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852b.h" +#include "rtw8852b_common.h" #include "rtw8852b_rfk.h" #include "rtw8852b_rfk_table.h" #include "rtw8852b_table.h" @@ -20,7 +21,7 @@ #define RTW8852B_RF_REL_VERSION 34 #define RTW8852B_DPK_VER 0x0d #define RTW8852B_DPK_RF_PATH 2 -#define RTW8852B_DPK_KIP_REG_NUM 2 +#define RTW8852B_DPK_KIP_REG_NUM 3 #define _TSSI_DE_MASK GENMASK(21, 12) #define ADDC_T_AVG 100 @@ -3433,13 +3434,13 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rx_path = RF_ABCD; /* don't change path, but still set others */ if (enable) { - rtw8852b_bb_set_plcp_tx(rtwdev); - rtw8852b_bb_cfg_tx_path(rtwdev, path); - rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path); - rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy); + rtw8852bx_bb_set_plcp_tx(rtwdev); + rtw8852bx_bb_cfg_tx_path(rtwdev, path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); } - rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); } static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, @@ -3578,7 +3579,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0}; u8 channel = chan->channel; u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); - struct rtw8852b_bb_tssi_bak tssi_bak; + struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; u32 start_time, finish_time; u32 bb_reg_backup[8] = {0}; @@ -3626,7 +3627,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, else band = TSSI_ALIMK_2G; - rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak); _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8); @@ -3730,8 +3731,8 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, out: _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); - rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak); - rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0); + rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); finish_time = ktime_get_ns(); tssi_info->tssi_alimk_time += finish_time - start_time; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c index 5f941122655c..d8f9d92ca0fb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) | BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) | BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h new file mode 100644 index 000000000000..6177f36ad667 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_H__ +#define __RTW89_8852BT_H__ + +#include "core.h" + +#define RF_PATH_NUM_8852BT 2 +#define BB_PATH_NUM_8852BT 2 + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c new file mode 100644 index 000000000000..fa0e49d58112 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c @@ -0,0 +1,4019 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852bt.h" +#include "rtw8852bt_rfk.h" +#include "rtw8852bt_rfk_table.h" +#include "rtw8852b_common.h" + +#define RTW8852BT_RXDCK_VER 0x1 +#define RTW8852BT_IQK_VER 0x2a +#define RTW8852BT_SS 2 +#define RTW8852BT_TSSI_PATH_NR 2 +#define RTW8852BT_DPK_VER 0x06 +#define DPK_RF_PATH_MAX_8852BT 2 + +#define _TSSI_DE_MASK GENMASK(21, 12) +#define DPK_TXAGC_LOWER 0x2e +#define DPK_TXAGC_UPPER 0x3f +#define DPK_TXAGC_INVAL 0xff +#define RFREG_MASKRXBB 0x003e0 +#define RFREG_MASKMODE 0xf0000 + +enum rf_mode { + RF_SHUT_DOWN = 0x0, + RF_STANDBY = 0x1, + RF_TX = 0x2, + RF_RX = 0x3, + RF_TXIQK = 0x4, + RF_DPK = 0x5, + RF_RXK1 = 0x6, + RF_RXK2 = 0x7, +}; + +enum rtw8852bt_dpk_id { + LBK_RXIQK = 0x06, + SYNC = 0x10, + MDPK_IDL = 0x11, + MDPK_MPA = 0x12, + GAIN_LOSS = 0x13, + GAIN_CAL = 0x14, + DPK_RXAGC = 0x15, + KIP_PRESET = 0x16, + KIP_RESTORE = 0x17, + DPK_TXAGC = 0x19, + D_KIP_PRESET = 0x28, + D_TXAGC = 0x29, + D_RXAGC = 0x2a, + D_SYNC = 0x2b, + D_GAIN_LOSS = 0x2c, + D_MDPK_IDL = 0x2d, + D_GAIN_NORM = 0x2f, + D_KIP_THERMAL = 0x30, + D_KIP_RESTORE = 0x31 +}; + +enum dpk_agc_step { + DPK_AGC_STEP_SYNC_DGAIN, + DPK_AGC_STEP_GAIN_ADJ, + DPK_AGC_STEP_GAIN_LOSS_IDX, + DPK_AGC_STEP_GL_GT_CRITERION, + DPK_AGC_STEP_GL_LT_CRITERION, + DPK_AGC_STEP_SET_TX_GAIN, +}; + +enum rtw8852bt_iqk_type { + ID_TXAGC = 0x0, + ID_FLOK_COARSE = 0x1, + ID_FLOK_FINE = 0x2, + ID_TXK = 0x3, + ID_RXAGC = 0x4, + ID_RXK = 0x5, + ID_NBTXK = 0x6, + ID_NBRXK = 0x7, + ID_FLOK_VBUFFER = 0x8, + ID_A_FLOK_COARSE = 0x9, + ID_G_FLOK_COARSE = 0xa, + ID_A_FLOK_FINE = 0xb, + ID_G_FLOK_FINE = 0xc, + ID_IQK_RESTORE = 0x10, +}; + +enum adc_ck { + ADC_NA = 0, + ADC_480M = 1, + ADC_960M = 2, + ADC_1920M = 3, +}; + +enum dac_ck { + DAC_40M = 0, + DAC_80M = 1, + DAC_120M = 2, + DAC_160M = 3, + DAC_240M = 4, + DAC_320M = 5, + DAC_480M = 6, + DAC_960M = 7, +}; + +static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820}; +static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18}; +static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = { + {0x5634, 0x5630, 0x5630, 0x5630}, + {0x7634, 0x7630, 0x7630, 0x7630} }; +static const u32 _tssi_cw_default_mask[4] = { + 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff}; +static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858}; +static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860}; +static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838}; +static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840}; +static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848}; +static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850}; +static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828}; +static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830}; + +static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704}; +static const u32 rtw8852bt_backup_rf_regs[] = { + 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005}; +static const u32 rtw8852bt_backup_kip_regs[] = { + 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, + 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec}; + +#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs) +#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs) +#define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs) + +static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); + + udelay(200); + + dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n", + dpk->bp[path][kidx].ther_dpk); +} + +static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + backup_bb_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]backup bb reg : %x, value =%x\n", + rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_KIP_REGS_NR; i++) { + backup_kip_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i], + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", + rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]); + } +} + +static +void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + backup_rf_reg_val[i] = + rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i], + RFREG_MASK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n", + rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], + MASKDWORD, backup_bb_reg_val[i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore bb reg : %x, value =%x\n", + rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_KIP_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i], + MASKDWORD, backup_kip_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore kip reg : %x, value =%x\n", + rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]); + } +} + +static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev, + const u32 backup_rf_reg_val[], u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i], + RFREG_MASK, backup_rf_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, + rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 val; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n", + rtwdev->dbcc_en, phy_idx); + + if (!rtwdev->dbcc_en) { + val = RF_AB; + } else { + if (phy_idx == RTW89_PHY_0) + val = RF_A; + else + val = RF_B; + } + return val; +} + +static +void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force, + enum dac_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); +} + +static +void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force, + enum adc_ck ck) +{ + u32 bw = 0; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); + + switch (ck) { + case ADC_480M: + bw = RTW89_CHANNEL_WIDTH_40; + break; + case ADC_960M: + bw = RTW89_CHANNEL_WIDTH_80; + break; + case ADC_1920M: + bw = RTW89_CHANNEL_WIDTH_160; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__); + break; + } + + rtw8852bx_adc_cfg(rtwdev, bw, path); +} + +static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff); + rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1); + + _txck_force(rtwdev, RF_PATH_A, true, DAC_960M); + _txck_force(rtwdev, RF_PATH_B, true, DAC_960M); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333); + + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000); +} + +static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2); +} + +static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + mdelay(1); +} + +static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, dck_tune; + u32 rf_reg5; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n", + RTW8852BT_RXDCK_VER, rtwdev->hal.cv); + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, + R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x1); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _set_rx_dck(rtwdev, phy, path); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, + R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x0); + } +} + +static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u32 rf_reg5; + u32 rck_val; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + /* RCK trigger */ + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, + false, rtwdev, path, RR_RCKS, BIT(3)); + + rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", + rck_val, ret); + + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); +} + +static void _drck(struct rtw89_dev *rtwdev) +{ + u32 rck_d; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_DRCK_RES, B_DRCK_POL); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); + + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); + + rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); +} + +static void _dack_backup_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + + for (i = 0; i < 0x10; i++) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); + dack->msbk_d[0][0][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); + + rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); + dack->msbk_d[0][1][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); + } + + dack->biask_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); + dack->biask_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); + + dack->dadck_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00); + dack->dadck_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01); +} + +static void _dack_backup_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + + for (i = 0; i < 0x10; i++) { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); + dack->msbk_d[1][0][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S); + + rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); + dack->msbk_d[1][1][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S); + } + + dack->biask_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10); + dack->biask_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11); + + dack->dadck_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10); + dack->dadck_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11); +} + +static +void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + if (path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1); + } +} + +static +void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 tmp, tmp_offset, tmp_reg; + u32 idx_offset, path_offset; + u8 i; + + if (index == 0) + idx_offset = 0; + else + idx_offset = 0x14; + + if (path == RF_PATH_A) + path_offset = 0; + else + path_offset = 0x28; + + tmp_offset = idx_offset + path_offset; + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1); + + /* msbk_d: 15/14/13/12 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); + tmp_reg = 0xc200 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 11/10/9/8 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); + tmp_reg = 0xc204 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 7/6/5/4 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); + tmp_reg = 0xc208 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 3/2/1/0 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i] << (i * 8); + tmp_reg = 0xc20c + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* dadak_d/biask_d */ + tmp = (dack->biask_d[path][index] << 22) | + (dack->dadck_d[path][index] << 14); + tmp_reg = 0xc210 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* enable DACK result from reg */ + rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1); +} + +static +void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u8 i; + + for (i = 0; i < 2; i++) + _dack_reload_by_path(rtwdev, path, i); +} + +static bool _dack_s0_poll(struct rtw89_dev *rtwdev) +{ + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) + return false; + + return true; +} + +static void _dack_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + _txck_force(rtwdev, RF_PATH_A, true, DAC_160M); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30); + rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30); + + _dack_reset(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); + udelay(1); + + dack->msbk_timeout[0] = false; + + ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, + 1, 20000, false, rtwdev); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0); + + _txck_force(rtwdev, RF_PATH_A, false, DAC_960M); + _dack_backup_s0(rtwdev); + _dack_reload(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); +} + +static bool _dack_s1_poll(struct rtw89_dev *rtwdev) +{ + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0) + return false; + + return true; +} + +static void _dack_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + _txck_force(rtwdev, RF_PATH_B, true, DAC_160M); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30); + rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30); + + _dack_reset(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1); + udelay(1); + + dack->msbk_timeout[1] = false; + + ret = read_poll_timeout_atomic(_dack_s1_poll, done, done, + 1, 10000, false, rtwdev); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n"); + dack->msbk_timeout[1] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0); + + _txck_force(rtwdev, RF_PATH_B, false, DAC_960M); + _dack_backup_s1(rtwdev); + _dack_reload(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _dack(struct rtw89_dev *rtwdev) +{ + _dack_s0(rtwdev); + _dack_s1(rtwdev); +} + +static void _dack_dump(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + u8 t; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[0][0], dack->addck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[1][0], dack->addck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[0][0], dack->dadck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[1][0], dack->dadck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[0][0], dack->biask_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[1][0], dack->biask_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); + for (i = 0; i < 0x10; i++) { + t = dack->msbk_d[0][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } +} + +static void _addck_ori(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); + dack->addck_timeout[0] = false; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_ADDCKR0, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); + dack->addck_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); + dack->addck_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); + dack->addck_timeout[1] = false; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_ADDCKR1, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); + dack->addck_timeout[1] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); + dack->addck_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0); + dack->addck_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _addck_reload(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3); +} + +static void _dack_manual_off(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0); +} + +static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + dack->dack_done = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); + + _drck(rtwdev); + _dack_manual_off(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M); + _addck_ori(rtwdev); + + _rxck_force(rtwdev, RF_PATH_A, false, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, false, ADC_960M); + _addck_reload(rtwdev); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); + + _dack(rtwdev); + _dack_dump(rtwdev); + dack->dack_done = true; + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1); + + dack->dack_cnt++; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); +} + +static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) +{ + bool notready = false; + u32 val; + int ret; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 10, 8200, false, + rtwdev, R_RFK_ST, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n"); + + udelay(10); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, + 10, 400, false, + rtwdev, R_RPT_COM, B_RPT_COM_RDY); + if (ret) { + notready = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n"); + } + + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); + + return notready; +} + +static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path, u8 ktype) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 iqk_cmd; + bool fail; + + switch (ktype) { + case ID_TXAGC: + iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); + break; + case ID_FLOK_COARSE: + iqk_cmd = 0x108 | (1 << (4 + path)); + break; + case ID_FLOK_FINE: + iqk_cmd = 0x208 | (1 << (4 + path)); + break; + case ID_FLOK_VBUFFER: + iqk_cmd = 0x308 | (1 << (4 + path)); + break; + case ID_TXK: + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_RXAGC: + iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); + break; + case ID_RXK: + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_NBTXK: + iqk_cmd = 0x408 | (1 << (4 + path)); + break; + case ID_NBRXK: + iqk_cmd = 0x608 | (1 << (4 + path)); + break; + default: + return false; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n", + __func__, iqk_cmd + 1); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); + fail = _iqk_check_cal(rtwdev, path, ktype); + + return fail; +} + +static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5); + udelay(1); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4); + udelay(1); + break; + default: + break; + } +} + +static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + return false; +} + +static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + return false; +} + +static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0}; + static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6}; + static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e}; + static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + for (gp = 0x0; gp < 0x4; gp++) { + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000004, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000003, gp); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, + 0x009); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, g_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, g_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n", + path, gp, 1 << path, iqk_info->nb_txcfir[path]); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_TXCFIR, 0x0); + } + + return kfail; +} + +static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0}; + static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6}; + static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e}; + static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u8 gp; + + for (gp = 0x0; gp < 0x4; gp++) { + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, a_itqt[gp]); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000004, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000003, gp); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, + 0x009); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, a_itqt[gp]); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, a_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n", + path, gp, 1 << path, iqk_info->nb_txcfir[path]); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_TXCFIR, 0x0); + } + + return kfail; +} + +static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + + if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { + _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0x8); + } else { + _rxck_force(rtwdev, RF_PATH_A, true, ADC_480M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_480M); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xf); + } + + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 g_idxrxgain[2] = {0x212, 0x310}; + static const u32 g_idxattc2[2] = {0x00, 0x20}; + static const u32 g_idxattc1[2] = {0x3, 0x2}; + static const u32 g_idxrxagc[2] = {0x0, 0x2}; + static const u32 g_idx[2] = {0x0, 0x2}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u32 rf_18, tmp; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + + for (gp = 0x0; gp < 0x2; gp++) { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]); + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000007, g_idx[gp]); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + + tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp); + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path, + rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0)); + + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path, + g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_RXCFIR, 0x0); + } + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + + return kfail; +} + +static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 a_idxrxgain[2] = {0x110, 0x290}; + static const u32 a_idxattc2[2] = {0x0f, 0x0f}; + static const u32 a_idxattc1[2] = {0x2, 0x2}; + static const u32 a_idxrxagc[2] = {0x4, 0x6}; + static const u32 a_idx[2] = {0x0, 0x2}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u32 rf_18, tmp; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + + for (gp = 0x0; gp < 0x2; gp++) { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]); + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000007, a_idx[gp]); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + + tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp); + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path, + rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0)); + + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(200); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", + path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_RXCFIR, 0x0); + } + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + + return kfail; +} + +static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool lok_result = false; + bool txk_result = false; + bool rxk_result = false; + u8 i; + + for (i = 0; i < 3; i++) { + _iqk_txk_setting(rtwdev, path); + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + lok_result = _iqk_2g_lok(rtwdev, phy_idx, path); + else + lok_result = _iqk_5g_lok(rtwdev, phy_idx, path); + + if (!lok_result) + break; + } + + if (lok_result) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n"); + rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK)); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK)); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK)); + + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + txk_result = _iqk_2g_tx(rtwdev, phy_idx, path); + else + txk_result = _iqk_5g_tx(rtwdev, phy_idx, path); + + _iqk_rxclk_setting(rtwdev, path); + _iqk_adc_fifo_rst(rtwdev, phy_idx, path); + + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path); + else + rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n", + lok_result, txk_result, rxk_result); +} + +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 get_empty_table = false; + u32 reg_rf18; + u32 reg_35c; + u8 idx; + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + if (iqk_info->iqk_mcc_ch[idx][path] == 0) { + get_empty_table = true; + break; + } + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx); + + if (!get_empty_table) { + idx = iqk_info->iqk_table_idx[path] + 1; + if (idx > 1) + idx = 0; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx); + + reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN); + + iqk_info->iqk_band[path] = chan->band_type; + iqk_info->iqk_bw[path] = chan->band_width; + iqk_info->iqk_ch[path] = chan->channel; + iqk_info->iqk_mcc_ch[idx][path] = chan->channel; + iqk_info->iqk_table_idx[path] = idx; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n", + path, reg_rf18, idx); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n", + path, reg_rf18); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n", + path, reg_35c); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", + iqk_info->iqk_times, idx); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n", + idx, path, iqk_info->iqk_mcc_ch[idx][path]); +} + +static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + _iqk_by_path(rtwdev, phy_idx, path); +} + +static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__); + + if (iqk_info->is_nbiqk) { + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD, iqk_info->nb_txcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD, iqk_info->nb_rxcfir[path]); + } else { + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD, 0x40000000); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD, 0x40000000); + } + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, + 0x00000e19 + (path << 4)); + + _iqk_check_cal(rtwdev, path, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0); + + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); +} + +static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0); +} + +static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) +{ + u8 idx = 0; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx); + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); +} + +static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff); + rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1); + + _txck_force(rtwdev, RF_PATH_A, true, DAC_960M); + _txck_force(rtwdev, RF_PATH_B, true, DAC_960M); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static void _iqk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 idx, path; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); + + if (iqk_info->is_iqk_init) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + iqk_info->is_iqk_init = true; + iqk_info->is_nbiqk = false; + iqk_info->iqk_fft_en = false; + iqk_info->iqk_sram_en = false; + iqk_info->iqk_cfir_en = false; + iqk_info->iqk_xym_en = false; + iqk_info->iqk_times = 0x0; + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + iqk_info->iqk_channel[idx] = 0x0; + for (path = 0; path < RTW8852BT_SS; path++) { + iqk_info->lok_cor_fail[idx][path] = false; + iqk_info->lok_fin_fail[idx][path] = false; + iqk_info->iqk_tx_fail[idx][path] = false; + iqk_info->iqk_rx_fail[idx][path] = false; + iqk_info->iqk_mcc_ch[idx][path] = 0x0; + iqk_info->iqk_table_idx[path] = 0x0; + } + } +} + +static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) +{ + u32 rf_mode; + u8 path; + int ret; + + for (path = 0; path < RF_PATH_MAX; path++) { + if (!(kpath & BIT(path))) + continue; + + ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, + rf_mode != 2, 2, 5000, false, + rtwdev, path, RR_MOD, RR_MOD_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret); + } +} + +static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx, + bool is_pause) +{ + if (!is_pause) + return; + + _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx)); +} + +static void _doiqk(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR]; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]==========IQK start!!!!!==========\n"); + iqk_info->iqk_times++; + iqk_info->version = RTW8852BT_IQK_VER; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); + _iqk_get_ch_info(rtwdev, phy_idx, path); + + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); + _rfk_reload_bb_reg(rtwdev, backup_bb_val); + _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +{ + u8 kpath = _kpath(rtwdev, phy_idx); + + switch (kpath) { + case RF_A: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + break; + case RF_B: + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + case RF_AB: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + default: + break; + } +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 val, kidx = dpk->cur_idx[path]; + bool off_reverse; + + val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; + + if (off) + off_reverse = false; + else + off_reverse = true; + + val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + BIT(24), val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, + kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable"); +} + +static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, enum rtw8852bt_dpk_id id) +{ + u16 dpk_cmd; + u32 val; + int ret; + + dpk_cmd = (id << 8) | (0x19 + (path << 4)); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 1, 30000, false, + rtwdev, R_RFK_ST, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n"); + + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, + 1, 2000, false, + rtwdev, R_RPT_COM, MASKLWORD); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n"); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot for %s = 0x%04x\n", + id == 0x06 ? "LBK_RXIQK" : + id == 0x10 ? "SYNC" : + id == 0x11 ? "MDPK_IDL" : + id == 0x12 ? "MDPK_MPA" : + id == 0x13 ? "GAIN_LOSS" : + id == 0x14 ? "PWR_CAL" : + id == 0x15 ? "DPK_RXAGC" : + id == 0x16 ? "KIP_PRESET" : + id == 0x17 ? "KIP_RESOTRE" : + "DPK_TXAGC", dpk_cmd); +} + +static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + + udelay(600); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path); +} + +static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].band = chan->band_type; + dpk->bp[path][kidx].ch = chan->channel; + dpk->bp[path][kidx].bw = chan->band_width; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", + path, dpk->cur_idx[path], phy, + rtwdev->is_tssi_mode[path] ? "on" : "off", + rtwdev->dbcc_en ? "on" : "off", + dpk->bp[path][kidx].band == 0 ? "2G" : + dpk->bp[path][kidx].band == 1 ? "5G" : "6G", + dpk->bp[path][kidx].ch, + dpk->bp[path][kidx].bw == 0 ? "20M" : + dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); +} + +static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_pause) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, is_pause); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, + is_pause ? "pause" : "resume"); +} + +static void _dpk_kip_restore(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + if (rtwdev->hal.cv > CHIP_CAV) + rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), + B_DPD_COM_OF, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); +} + +static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18) +{ + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0); + + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1); + + if (cur_rxbb >= 0x11) + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13); + else if (cur_rxbb <= 0xa) + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00); + else + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05); + + rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014); + + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025); + + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5); +} + +static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + } else { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC); + rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0); + rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800); + } + + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); +} + +static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bypass) +{ + if (is_bypass) { + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS, 0x0); + } +} + +static +void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); + else + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); +} + +static void _dpk_table_select(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + u8 val; + + val = 0x80 + kidx * 0x20 + gain * 0x10; + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx, + gain, val); +} + +static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define DPK_SYNC_TH_DC_I 200 +#define DPK_SYNC_TH_DC_Q 200 +#define DPK_SYNC_TH_CORR 170 + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 corr_val, corr_idx; + u16 dc_i, dc_q; + u32 corr, dc; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + corr_idx = u32_get_bits(corr, B_PRT_COM_CORI); + corr_val = u32_get_bits(corr, B_PRT_COM_CORV); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", + path, corr_idx, corr_val); + + dpk->corr_idx[path][kidx] = corr_idx; + dpk->corr_val[path][kidx] = corr_val; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); + + dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + dc_i = u32_get_bits(dc, B_PRT_COM_DCI); + dc_q = u32_get_bits(dc, B_PRT_COM_DCQ); + + dc_i = abs(sign_extend32(dc_i, 11)); + dc_q = abs(sign_extend32(dc_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", + path, dc_i, dc_q); + + dpk->dc_i[path][kidx] = dc_i; + dpk->dc_q[path][kidx] = dc_q; + + if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || + corr_val < DPK_SYNC_TH_CORR) + return true; + else + return false; +} + +static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, SYNC); +} + +static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) +{ + u16 dgain; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); + + return dgain; +} + +static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain) +{ + static const u16 bnd[15] = { + 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556, + 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220 + }; + s8 offset; + + if (dgain >= bnd[0]) + offset = 0x6; + else if (bnd[0] > dgain && dgain >= bnd[1]) + offset = 0x6; + else if (bnd[1] > dgain && dgain >= bnd[2]) + offset = 0x5; + else if (bnd[2] > dgain && dgain >= bnd[3]) + offset = 0x4; + else if (bnd[3] > dgain && dgain >= bnd[4]) + offset = 0x3; + else if (bnd[4] > dgain && dgain >= bnd[5]) + offset = 0x2; + else if (bnd[5] > dgain && dgain >= bnd[6]) + offset = 0x1; + else if (bnd[6] > dgain && dgain >= bnd[7]) + offset = 0x0; + else if (bnd[7] > dgain && dgain >= bnd[8]) + offset = 0xff; + else if (bnd[8] > dgain && dgain >= bnd[9]) + offset = 0xfe; + else if (bnd[9] > dgain && dgain >= bnd[10]) + offset = 0xfd; + else if (bnd[10] > dgain && dgain >= bnd[11]) + offset = 0xfc; + else if (bnd[11] > dgain && dgain >= bnd[12]) + offset = 0xfb; + else if (bnd[12] > dgain && dgain >= bnd[13]) + offset = 0xfa; + else if (bnd[13] > dgain && dgain >= bnd[14]) + offset = 0xf9; + else if (bnd[14] > dgain) + offset = 0xf8; + else + offset = 0x0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset); + + return offset; +} + +static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); + + return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); +} + +static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); +} + +static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_tpg_sel(rtwdev, path, kidx); + _dpk_one_shot(rtwdev, phy, path, KIP_PRESET); +} + +static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n"); +} + +static +u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (txagc >= dpk->max_dpk_txagc[path]) + txagc = dpk->max_dpk_txagc[path]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc); + + return txagc; +} + +static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 txagc) +{ + u8 val; + + val = _dpk_txagc_check_8852bt(rtwdev, path, txagc); + rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc); +} + +static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); +} + +static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 txagc, s8 gain_offset) +{ + txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK); + + if ((txagc - gain_offset) < DPK_TXAGC_LOWER) + txagc = DPK_TXAGC_LOWER; + else if ((txagc - gain_offset) > DPK_TXAGC_UPPER) + txagc = DPK_TXAGC_UPPER; + else + txagc = txagc - gain_offset; + + _dpk_kip_set_txagc(rtwdev, phy, path, txagc); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n", + gain_offset, txagc); + return txagc; +} + +static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 is_check) +{ + u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); + + if (is_check) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); + val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val1_i = abs(sign_extend32(val1_i, 11)); + val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val1_q = abs(sign_extend32(val1_q, 11)); + + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); + val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val2_i = abs(sign_extend32(val2_i, 11)); + val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val2_q = abs(sign_extend32(val2_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", + phy_div(val1_i * val1_i + val1_q * val1_q, + val2_i * val2_i + val2_q * val2_q)); + } else { + for (i = 0; i < 32; i++) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] PAS_Read[%02d]= 0x%08x\n", i, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + } + + if (val1_i * val1_i + val1_q * val1_q >= + (val2_i * val2_i + val2_q * val2_q) * 8 / 5) + return true; + + return false; +} + +static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 init_txagc, + bool loss_only) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0; + u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0; + u8 step = DPK_AGC_STEP_SYNC_DGAIN; + int limit = 200; + s8 offset = 0; + u16 dgain = 0; + u32 rf_18; + + tmp_txagc = init_txagc; + + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + + do { + switch (step) { + case DPK_AGC_STEP_SYNC_DGAIN: + _dpk_sync(rtwdev, phy, path, kidx); + if (agc_cnt == 0) { + if (chan->band_width < 2) + _dpk_bypass_rxcfir(rtwdev, path, true); + else + _dpk_lbk_rxiqk(rtwdev, phy, path, + tmp_rxbb, rf_18); + } + + if (_dpk_sync_check(rtwdev, path, kidx) == true) { + tmp_txagc = 0xff; + goout = 1; + break; + } + + dgain = _dpk_dgain_read(rtwdev); + offset = _dpk_dgain_mapping(rtwdev, dgain); + + if (loss_only == 1 || limited_rxbb == 1 || offset == 0) + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + else + step = DPK_AGC_STEP_GAIN_ADJ; + break; + case DPK_AGC_STEP_GAIN_ADJ: + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB); + + if (tmp_rxbb + offset > 0x1f) { + tmp_rxbb = 0x1f; + limited_rxbb = 1; + } else if (tmp_rxbb + offset < 0) { + tmp_rxbb = 0; + limited_rxbb = 1; + } else { + tmp_rxbb = tmp_rxbb + offset; + } + + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb); + + if (chan->band_width == RTW89_CHANNEL_WIDTH_80) + _dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18); + if (dgain > 1922 || dgain < 342) + step = DPK_AGC_STEP_SYNC_DGAIN; + else + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + + agc_cnt++; + break; + case DPK_AGC_STEP_GAIN_LOSS_IDX: + _dpk_gainloss(rtwdev, phy, path, kidx); + + tmp_gl_idx = _dpk_gainloss_read(rtwdev); + + if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) || + tmp_gl_idx >= 7) + step = DPK_AGC_STEP_GL_GT_CRITERION; + else if (tmp_gl_idx == 0) + step = DPK_AGC_STEP_GL_LT_CRITERION; + else + step = DPK_AGC_STEP_SET_TX_GAIN; + + gl_cnt++; + break; + case DPK_AGC_STEP_GL_GT_CRITERION: + if (tmp_txagc == 0x2e || + tmp_txagc == dpk->max_dpk_txagc[path]) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@lower bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, + tmp_txagc, 0x3); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_GL_LT_CRITERION: + if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@upper bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, + tmp_txagc, 0xfe); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_SET_TX_GAIN: + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc, + tmp_gl_idx); + goout = 1; + agc_cnt++; + break; + + default: + goout = 1; + break; + } + } while (!goout && agc_cnt < 6 && limit-- > 0); + + if (gl_cnt >= 6) + _dpk_pas_read(rtwdev, path, false); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb); + + return tmp_txagc; +} + +static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 order) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + switch (order) { + case 0: /* (5,3,1) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1); + dpk->dpk_order[path] = 0x3; + break; + case 1: /* (5,3,0) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0); + dpk->dpk_order[path] = 0x1; + break; + case 2: /* (5,0,0) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0); + dpk->dpk_order[path] = 0x0; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Wrong MDPD order!!(0x%x)\n", order); + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", + order == 0x0 ? "(5,3,1)" : + order == 0x1 ? "(5,3,0)" : "(5,0,0)"); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Set MDPD order to 0x%x for IDL\n", order); +} + +static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 && + dpk->bp[path][kidx].band == RTW89_BAND_5G) + _dpk_set_mdpd_para(rtwdev, path, 0x2); + else + _dpk_set_mdpd_para(rtwdev, path, 0x0); + + _dpk_one_shot(rtwdev, phy, path, MDPK_IDL); +} + +static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 gs = dpk->dpk_gs[phy]; + u16 pwsf = 0x78; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", + txagc, pwsf, gs); + + dpk->bp[path][kidx].txagc_dpk = txagc; + rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8), + 0x3F << ((gain << 3) + (kidx << 4)), txagc); + + dpk->bp[path][kidx].pwsf = pwsf; + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x1FF << (gain << 4), pwsf); + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); + + dpk->bp[path][kidx].gs = gs; + if (dpk->dpk_gs[phy] == 0x7f) + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x007f7f7f); + else + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x005b5b5b); + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_ORDER_V1, dpk->dpk_order[path]); + + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0); +} + +static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 idx, cur_band, cur_ch; + bool is_reload = false; + + cur_band = chan->band_type; + cur_ch = chan->channel; + + for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { + if (cur_band != dpk->bp[path][idx].band || + cur_ch != dpk->bp[path][idx].ch) + continue; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_MDPD, idx); + dpk->cur_idx[path] = idx; + is_reload = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] reload S%d[%d] success\n", path, idx); + } + + return is_reload; +} + +static +void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); +} + +static +void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); +} + +static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 txagc = 0x38, kidx = dpk->cur_idx[path]; + bool is_fail = false; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); + + _rf_direct_cntrl(rtwdev, path, false); + _drf_direct_cntrl(rtwdev, path, false); + + _dpk_kip_pwr_clk_on(rtwdev, path); + _dpk_kip_set_txagc(rtwdev, phy, path, txagc); + _dpk_rf_setting(rtwdev, gain, path, kidx); + _dpk_rx_dck(rtwdev, phy, path); + _dpk_kip_preset(rtwdev, phy, path, kidx); + _dpk_kip_set_rxagc(rtwdev, phy, path); + _dpk_table_select(rtwdev, path, kidx, gain); + + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + + _rfk_get_thermal(rtwdev, kidx, path); + + if (txagc == 0xff) { + is_fail = true; + goto _error; + } + + _dpk_idl_mpa(rtwdev, phy, path, kidx, gain); + + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX); + _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc); + +_error: + if (!is_fail) + dpk->bp[path][kidx].path_ok = 1; + else + dpk->bp[path][kidx].path_ok = 0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, + is_fail ? "Check" : "Success"); + + _dpk_onoff(rtwdev, path, is_fail); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, + is_fail ? "Check" : "Success"); + + return is_fail; +} + +static void _dpk_cal_select(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 kpath) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u32 backup_kip_val[BACKUP_KIP_REGS_NR]; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR]; + bool reloaded[2] = {false}; + u8 path; + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + if (!reloaded[path] && dpk->bp[path][0].ch != 0) + dpk->cur_idx[path] = !dpk->cur_idx[path]; + else + _dpk_onoff(rtwdev, path, false); + } + + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_kip_reg(rtwdev, backup_kip_val); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_information(rtwdev, phy, path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, true); + } + + _rfk_bb_afe_setting(rtwdev, phy, path, kpath); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) + _dpk_main(rtwdev, phy, path, 1); + + _rfk_bb_afe_restore(rtwdev, phy, path, kpath); + + _dpk_kip_restore(rtwdev, path); + _rfk_reload_bb_reg(rtwdev, backup_bb_val); + _rfk_reload_kip_reg(rtwdev, backup_kip_val); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, false); + } +} + +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_fem_info *fem = &rtwdev->fem; + + if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); + return true; + } + + return false; +} + +static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, kpath; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RTW8852BT_SS; path++) { + if (kpath & BIT(path)) + _dpk_onoff(rtwdev, path, true); + } +} + +static void _dpk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst; + s8 delta_ther[2] = {}; + u8 trk_idx, txagc_rf; + u8 path, kidx; + u16 pwsf[2]; + u8 cur_ther; + u32 tmp; + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + kidx = dpk->cur_idx[path]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", + path, kidx, dpk->bp[path][kidx].ch); + + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] thermal now = %d\n", cur_ther); + + if (dpk->bp[path][kidx].ch && cur_ther) + delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) + delta_ther[path] = delta_ther[path] * 3 / 2; + else + delta_ther[path] = delta_ther[path] * 5 / 2; + + txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + B_TXAGC_RF); + + if (rtwdev->is_tssi_mode[path]) { + trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n", + txagc_rf, trk_idx); + + txagc_bb = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + MASKBYTE2); + txagc_bb_tp = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13), + B_TXAGC_TP); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", + txagc_bb_tp, txagc_bb); + + txagc_ofst = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + MASKBYTE3); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n", + txagc_ofst, delta_ther[path]); + tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8), + B_DPD_COM_OF); + if (tmp == 0x1) { + txagc_ofst = 0; + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] HW txagc offset mode\n"); + } + + if (txagc_rf && cur_ther) + ini_diff = txagc_ofst + (delta_ther[path]); + + tmp = rtw89_phy_read32_mask(rtwdev, + R_P0_TXDPD + (path << 13), + B_P0_TXDPD); + if (tmp == 0x0) { + pwsf[0] = dpk->bp[path][kidx].pwsf + + txagc_bb_tp - txagc_bb + ini_diff; + pwsf[1] = dpk->bp[path][kidx].pwsf + + txagc_bb_tp - txagc_bb + ini_diff; + } else { + pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff; + pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff; + } + } else { + pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + } + + tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS); + if (!tmp && txagc_rf) { + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n", + pwsf[0], pwsf[1]); + + rtw89_phy_write32_mask(rtwdev, + R_DPD_BND + (path << 8) + (kidx << 2), + B_DPD_BND_0, pwsf[0]); + rtw89_phy_write32_mask(rtwdev, + R_DPD_BND + (path << 8) + (kidx << 2), + B_DPD_BND_1, pwsf[1]); + } + } +} + +static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 tx_scale, ofdm_bkof, path, kpath; + + kpath = _kpath(rtwdev, phy); + + ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM); + tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA); + + if (ofdm_bkof + tx_scale >= 44) { + /* move dpd backoff to bb, and set dpd backoff to 0 */ + dpk->dpk_gs[phy] = 0x7f; + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + if (!(kpath & BIT(path))) + continue; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8), + B_DPD_CFG, 0x7f7f7f); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Set S%d DPD backoff to 0dB\n", path); + } + } else { + dpk->dpk_gs[phy] = 0x5b; + } +} + +static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0); +} + +static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + if (band == RTW89_BAND_2G) + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); +} + +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl); + + if (chan->band_width == RTW89_CHANNEL_WIDTH_80) + rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1); + else + rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0); + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_sys_a_defs_2g_tbl, + &rtw8852bt_tssi_sys_a_defs_5g_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_sys_b_defs_2g_tbl, + &rtw8852bt_tssi_sys_b_defs_5g_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_init_txpwr_defs_a_tbl, + &rtw8852bt_tssi_init_txpwr_defs_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl, + &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl); +} + +static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_dck_defs_a_tbl, + &rtw8852bt_tssi_dck_defs_b_tbl); +} + +static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \ +({ \ + s8 *__ptr = (ptr); \ + u8 __idx = (idx), __i, __v; \ + u32 __val = 0; \ + for (__i = 0; __i < 4; __i++) { \ + __v = (__ptr[__idx + __i]); \ + __val |= (__v << (8 * __i)); \ + } \ + __val; \ +}) + struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u8 subband = chan->subband_type; + const s8 *thm_up_a = NULL; + const s8 *thm_down_a = NULL; + const s8 *thm_up_b = NULL; + const s8 *thm_down_b = NULL; + u8 thermal = 0xff; + s8 thm_ofst[64] = {0}; + u32 tmp = 0; + u8 i, j; + + switch (subband) { + default: + case RTW89_CH_2G: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; + break; + case RTW89_CH_5G_BAND_1: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; + break; + case RTW89_CH_5G_BAND_3: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; + break; + case RTW89_CH_5G_BAND_4: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; + break; + } + + if (path == RF_PATH_A) { + thermal = tssi_info->thermal[RF_PATH_A]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + R_P0_TSSI_BASE + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, + thermal); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_a[i++] : + -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_a[i++] : + thm_up_a[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); + + } else { + thermal = tssi_info->thermal[RF_PATH_B]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, + thermal); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_b[i++] : + -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_b[i++] : + thm_up_b[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); + } +#undef RTW8852BT_TSSI_GET_VAL +} + +static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_dac_gain_defs_a_tbl, + &rtw8852bt_tssi_dac_gain_defs_b_tbl); +} + +static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_slope_a_defs_2g_tbl, + &rtw8852bt_tssi_slope_a_defs_5g_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_slope_b_defs_2g_tbl, + &rtw8852bt_tssi_slope_b_defs_5g_tbl); +} + +static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, bool all) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + const struct rtw89_rfk_tbl *tbl = NULL; + u8 ch = chan->channel; + + if (path == RF_PATH_A) { + if (band == RTW89_BAND_2G) + tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl; + else if (ch >= 36 && ch <= 64) + tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl; + else if (ch >= 100 && ch <= 144) + tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl; + else if (ch >= 149 && ch <= 177) + tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl; + } else { + if (ch >= 1 && ch <= 14) + tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl; + else if (ch >= 36 && ch <= 64) + tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl; + else if (ch >= 100 && ch <= 144) + tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl; + else if (ch >= 149 && ch <= 177) + tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl; + } + + if (tbl) + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_slope_defs_a_tbl, + &rtw8852bt_tssi_slope_defs_b_tbl); +} + +static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0); + else + rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0); +} + +static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__, + path); + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, + B_P0_TSSI_MV_MIX, 0x010); + else + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, + B_P1_RFCTM_DEL, 0x010); +} + +static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BT; i++) { + _tssi_set_tssi_track(rtwdev, phy, i); + _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); + + if (i == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, + B_P0_TSSI_MV_CLR, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, + B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, + B_P0_TSSI_EN, 0x1); + rtw89_write_rf(rtwdev, i, RR_TXGA_V1, + RR_TXGA_V1_TRK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_RFC, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT_EN, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_A] = true; + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, + B_P1_TSSI_MV_CLR, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, + B_P1_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, + B_P1_TSSI_EN, 0x1); + rtw89_write_rf(rtwdev, i, RR_TXGA_V1, + RR_TXGA_V1_TRK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_RFC, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT_EN, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_B] = true; + } + } +} + +static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; +} + +static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 13: + return 4; + case 14: + return 5; + } + + return 0; +} + +#define TSSI_EXTRA_GROUP_BIT (BIT(31)) +#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) +#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) + +static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 14: + return 4; + case 36 ... 40: + return 5; + case 41 ... 43: + return TSSI_EXTRA_GROUP(5); + case 44 ... 48: + return 6; + case 49 ... 51: + return TSSI_EXTRA_GROUP(6); + case 52 ... 56: + return 7; + case 57 ... 59: + return TSSI_EXTRA_GROUP(7); + case 60 ... 64: + return 8; + case 100 ... 104: + return 9; + case 105 ... 107: + return TSSI_EXTRA_GROUP(9); + case 108 ... 112: + return 10; + case 113 ... 115: + return TSSI_EXTRA_GROUP(10); + case 116 ... 120: + return 11; + case 121 ... 123: + return TSSI_EXTRA_GROUP(11); + case 124 ... 128: + return 12; + case 129 ... 131: + return TSSI_EXTRA_GROUP(12); + case 132 ... 136: + return 13; + case 137 ... 139: + return TSSI_EXTRA_GROUP(13); + case 140 ... 144: + return 14; + case 149 ... 153: + return 15; + case 154 ... 156: + return TSSI_EXTRA_GROUP(15); + case 157 ... 161: + return 16; + case 162 ... 164: + return TSSI_EXTRA_GROUP(16); + case 165 ... 169: + return 17; + case 170 ... 172: + return TSSI_EXTRA_GROUP(17); + case 173 ... 177: + return 18; + } + + return 0; +} + +static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 8: + return 0; + case 9 ... 14: + return 1; + case 36 ... 48: + return 2; + case 52 ... 64: + return 3; + case 100 ... 112: + return 4; + case 116 ... 128: + return 5; + case 132 ... 144: + return 6; + case 149 ... 177: + return 7; + } + + return 0; +} + +static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u32 gidx, gidx_1st, gidx_2nd; + s8 de_1st; + s8 de_2nd; + s8 val; + + gidx = _tssi_get_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + + return val; +} + +static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u32 tgidx, tgidx_1st, tgidx_2nd; + s8 tde_1st; + s8 tde_2nd; + s8 val; + + tgidx = _tssi_get_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + + return val; +} + +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u8 gidx; + s8 ofdm_de; + s8 trim_de; + s32 val; + u32 i; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", + phy, ch); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { + gidx = _tssi_get_cck_group(rtwdev, ch); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = tssi_info->tssi_cck[i][gidx] + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", + i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", + _tssi_de_cck_long[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], + _TSSI_DE_MASK)); + + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = ofdm_de + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", + i, ofdm_de, trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], + _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", + _tssi_de_mcs_20m[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], + _TSSI_DE_MASK)); + } +} + +static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" + "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", + R_TSSI_PA_K1 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)), + R_TSSI_PA_K2 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)), + R_P0_TSSI_ALIM1 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)), + R_P0_TSSI_ALIM3 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)), + R_TSSI_PA_K5 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)), + R_P0_TSSI_ALIM2 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)), + R_P0_TSSI_ALIM4 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)), + R_TSSI_PA_K8 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13))); +} + +static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 channel = chan->channel; + u8 band; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s phy=%d path=%d\n", __func__, phy, path); + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + if (tssi_info->alignment_done[path][band]) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][0]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][1]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][2]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][3]); + } + + _tssi_alimentk_dump_result(rtwdev, path); +} + +static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm, + u8 enable) +{ + enum rtw89_rf_path_bit rx_path; + + if (path == RF_PATH_A) + rx_path = RF_A; + else if (path == RF_PATH_B) + rx_path = RF_B; + else if (path == RF_PATH_AB) + rx_path = RF_AB; + else + rx_path = RF_ABCD; /* don't change path, but still set others */ + + if (enable) { + rtw8852bx_bb_set_plcp_tx(rtwdev); + rtw8852bx_bb_cfg_tx_path(rtwdev, path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); + } + + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); +} + +static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, const u32 reg[], + u32 reg_backup[], u32 reg_num) +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i], + reg_backup[i]); + } +} + +static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, const u32 reg[], + u32 reg_backup[], u32 reg_num) + +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i], + reg_backup[i]); + } +} + +static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel) +{ + u8 channel_index; + + if (channel >= 1 && channel <= 14) + channel_index = channel - 1; + else if (channel >= 36 && channel <= 64) + channel_index = (channel - 36) / 2 + 14; + else if (channel >= 100 && channel <= 144) + channel_index = ((channel - 100) / 2) + 15 + 14; + else if (channel >= 149 && channel <= 177) + channel_index = ((channel - 149) / 2) + 38 + 14; + else + channel_index = 0; + + return channel_index; +} + +static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, const s16 *power, + u32 *tssi_cw_rpt) +{ + u32 tx_counter, tx_counter_tmp; + const int retry = 100; + u32 tmp; + int j, k; + + for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) { + rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1); + + tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] 0x%x = 0x%08x path=%d\n", + _tssi_trigger[path], tmp, path); + + if (j == 0) + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true); + else + _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true); + + tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] First HWTXcounter=%d path=%d\n", + tx_counter_tmp, path); + + for (k = 0; k < retry; k++) { + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], + B_TSSI_CWRPT_RDY); + if (tmp) + break; + + udelay(30); + + tx_counter_tmp = + rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n", + k, tx_counter_tmp, path); + } + + if (k >= retry) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n", + k, path); + + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + return false; + } + + tssi_cw_rpt[j] = + rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], + B_TSSI_CWRPT); + + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + + tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] Final HWTXcounter=%d path=%d\n", + tx_counter_tmp, path); + } + + return true; +} + +static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4, + 0x78e4, 0x49c0, 0x0d18, 0x0d80}; + static const s16 power_2g[4] = {48, 20, 4, -8}; + static const s16 power_5g[4] = {48, 20, 4, 4}; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3; + u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {}; + u8 channel = chan->channel; + u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); + struct rtw8852bx_bb_tssi_bak tssi_bak; + s32 aliment_diff, tssi_cw_default; + u32 start_time, finish_time; + u32 bb_reg_backup[8] = {}; + const s16 *power; + u8 band; + bool ok; + u32 tmp; + u8 j; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======> %s channel=%d path=%d\n", __func__, channel, + path); + + start_time = ktime_get_ns(); + + if (chan->band_type == RTW89_BAND_2G) + power = power_2g; + else + power = power_5g; + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak); + _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, + ARRAY_SIZE(bb_reg_backup)); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); + + ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt); + if (!ok) + goto out; + + for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j, + power[j], j, tssi_cw_rpt[j]); + } + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1], + _tssi_cw_default_mask[1]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) - + tssi_cw_rpt[1] + tssi_cw_default; + aliment_diff = tssi_alim_offset_1 - tssi_cw_default; + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2], + _tssi_cw_default_mask[2]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_2 = tssi_cw_default + aliment_diff; + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3], + _tssi_cw_default_mask[3]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_3 = tssi_cw_default + aliment_diff; + + if (path == RF_PATH_A) { + tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | + FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | + FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13)); + } else { + tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | + FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | + FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13)); + } + + tssi_info->alignment_done[path][band] = true; + tssi_info->alignment_value[path][band][0] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][1] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][2] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][3] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); + + tssi_info->check_backup_aligmk[path][ch_idx] = true; + tssi_info->alignment_backup_by_ch[path][ch_idx][0] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][1] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][2] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][3] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM1 + (path << 13), + tssi_info->alignment_value[path][band][0]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM3 + (path << 13), + tssi_info->alignment_value[path][band][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM2 + (path << 13), + tssi_info->alignment_value[path][band][2]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM4 + (path << 13), + tssi_info->alignment_value[path][band][3]); + +out: + _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, + ARRAY_SIZE(bb_reg_backup)); + rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); + + finish_time = ktime_get_ns(); + tssi_info->tssi_alimk_time += finish_time - start_time; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] %s processing time = %d ms\n", __func__, + tssi_info->tssi_alimk_time); +} + +void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + u8 path; + + for (path = 0; path < 2; path++) { + dpk->cur_idx[path] = 0; + dpk->max_dpk_txagc[path] = 0x3F; + } + + dpk->is_dpk_enable = true; + dpk->is_dpk_reload_en = false; + _set_dpd_backoff(rtwdev, RTW89_PHY_0); +} + +void rtw8852bt_rck(struct rtw89_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) + _rck(rtwdev, path); +} + +void rtw8852bt_dack(struct rtw89_dev *rtwdev) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); + _dac_cal(rtwdev, false); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); +} + +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u32 tx_en; + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _iqk_init(rtwdev); + _iqk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); +} + +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u32 tx_en; + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _rx_dck(rtwdev, phy_idx); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); +} + +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER); + + if (_dpk_bypass_check(rtwdev, phy_idx)) + _dpk_force_bypass(rtwdev, phy_idx); + else + _dpk_cal_select(rtwdev, phy_idx, RF_AB); +} + +void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev) +{ + _dpk_track(rtwdev); +} + +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) +{ + static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B}; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB); + u32 reg_backup[2] = {}; + u32 tx_en; + u8 i; + + _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + _tssi_dpk_off(rtwdev, phy); + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_dac_gain_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_set_tssi_slope(rtwdev, phy, i); + + rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); + _tmac_tx_pause(rtwdev, phy, true); + if (hwtx_en) + _tssi_alimentk(rtwdev, phy, i); + _tmac_tx_pause(rtwdev, phy, false); + rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); + + _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 channel = chan->channel; + u8 band; + u32 i; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s phy=%d channel=%d\n", __func__, phy, channel); + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + + if (tssi_info->alignment_done[i][band]) + _tssi_alimentk_done(rtwdev, phy, i); + else + _tssi_alignment_default(rtwdev, phy, i, true); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 channel = chan->channel; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", + __func__, channel); + + if (enable) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", + __func__, + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); + + _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_B); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", + __func__, + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======> %s SCAN_END\n", __func__); +} + +void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx) +{ + if (scan_start) + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true); + else + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h new file mode 100644 index 000000000000..09918835c6e8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_RFK_H__ +#define __RTW89_8852BT_RFK_H__ + +#include "core.h" + +void rtw8852bt_rck(struct rtw89_dev *rtwdev); +void rtw8852bt_dack(struct rtw89_dev *rtwdev); +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev); +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev); +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en); +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c new file mode 100644 index 000000000000..782144bb7f49 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "rtw8852bt_rfk_table.h" + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_defs[] = { + RTW89_DECL_RFK_WM(0x12a8, 0x0000000f, 0x4), + RTW89_DECL_RFK_WM(0x32a8, 0x0000000f, 0x4), + RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0x5555), + RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0x5555), + RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), + RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x19), + RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), + RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0), + RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_2g[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_5g[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_2g[] = { + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_5g[] = { + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_a[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x002d000), + RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x1dc80280), + RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00002080), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00), + RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_b[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x002d000), + RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x1dc80280), + RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00002080), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00), + RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_a[] = { + RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_b[] = { + RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x0ef), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x0ef), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_a[] = { + RTW89_DECL_RFK_WM(0x58b0, 0x00000400, 0x1), + RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x000), + RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_b[] = { + RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x000), + RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_2g[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0804008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_5g[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201019), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_2g[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0804008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_5g[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201019), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_2g_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x029f57c0), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000077), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x029f5bc0), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000076), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_2g_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g1_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x007ff3d7), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000068), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g1_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g2_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00a003db), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g2_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g3_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01101be2), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g3_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_2g_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x023f3fb9), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000075), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x01df3fb8), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000074), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_2g_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g1_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x010017e0), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g1_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g2_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01201fe2), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000066), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g2_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g3_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01602fe5), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000068), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g3_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h new file mode 100644 index 000000000000..beb246237d17 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_RFK_TABLE_H__ +#define __RTW89_8852BT_RFK_TABLE_H__ + +#include "phy.h" + +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_2g_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g1_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g2_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g3_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_2g_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g1_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g2_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g3_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_b_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 544ea1d3b577..193168dc7b6c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -73,6 +73,10 @@ static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = { R_AX_C2HREG_DATA3_V1 }; +static const u32 rtw8852c_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3_V1 + 3, R_AX_DBG_WOW, +}; + static const struct rtw89_page_regs rtw8852c_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1, .ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1, @@ -3007,7 +3011,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8852c_c2h_regs, .page_regs = &rtw8852c_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3, + .wow_reason_reg = rtw8852c_wow_wakeup_regs, .cfo_src_fd = false, .cfo_hw_comp = false, .dcfo_comp = &rtw8852c_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 654e3e5507cb..743f7014bf3e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -4070,12 +4070,11 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; - DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {}; + struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {}; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; u8 chan_idx; u8 idx; - u8 i; mode = rtw89_get_entity_mode(rtwdev); switch (mode) { @@ -4087,34 +4086,21 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i break; } - for (i = 0; i <= chan_idx; i++) { - chan = rtw89_chan_get(rtwdev, i); + chan = rtw89_chan_get(rtwdev, chan_idx); - for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { - if (rfk_mcc->ch[idx] == chan->channel && - rfk_mcc->band[idx] == chan->band_type) { - if (i != chan_idx) { - set_bit(idx, map); - break; - } + for (idx = 0; idx < ARRAY_SIZE(desc); idx++) { + struct rtw89_rfk_chan_desc *p = &desc[idx]; - goto bottom; - } - } - } + p->ch = rfk_mcc->ch[idx]; - idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR); - if (idx == RTW89_IQK_CHS_NR) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "%s: no empty rfk table; force replace the first\n", - __func__); - idx = 0; + p->has_band = true; + p->band = rfk_mcc->band[idx]; } + idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); + rfk_mcc->ch[idx] = chan->channel; rfk_mcc->band[idx] = chan->band_type; - -bottom: rfk_mcc->table_idx = idx; } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index e07c7f3ade41..8aaad7d58c0d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -55,6 +55,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM_V1, .cpwm_addr = R_AX_PCIE_CRPWM, .mit_addr = R_AX_INT_MIT_RX_V1, + .wp_sel_addr = R_AX_WP_ADDR_H_SEL0_3, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index cc4f251caadd..2af568a3264d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -85,6 +85,10 @@ static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = { R_BE_C2HREG_DATA3 }; +static const u32 rtw8922a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3_V1 + 3, R_BE_DBG_WOW, +}; + static const struct rtw89_page_regs rtw8922a_page_regs = { .hci_fc_ctrl = R_BE_HCI_FC_CTRL, .ch_page_ctrl = R_BE_CH_PAGE_CTRL, @@ -2609,7 +2613,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8922a_c2h_regs, .page_regs = &rtw8922a_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3, + .wow_reason_reg = rtw8922a_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = NULL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index 2a371829268c..0ebcb06ae848 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -255,6 +255,7 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx) static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {}; enum rtw89_sub_entity_idx sub_entity_idx; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; @@ -265,16 +266,28 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) switch (mode) { case RTW89_ENTITY_MODE_MCC_PREPARE: sub_entity_idx = RTW89_SUB_ENTITY_1; - tbl_sel = 1; break; default: sub_entity_idx = RTW89_SUB_ENTITY_0; - tbl_sel = 0; break; } chan = rtw89_chan_get(rtwdev, sub_entity_idx); + for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) { + struct rtw89_rfk_chan_desc *p = &desc[tbl_sel]; + + p->ch = rfk_mcc->ch[tbl_sel]; + + p->has_band = true; + p->band = rfk_mcc->band[tbl_sel]; + + p->has_bw = true; + p->bw = rfk_mcc->bw[tbl_sel]; + } + + tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); + rfk_mcc->ch[tbl_sel] = chan->channel; rfk_mcc->band[tbl_sel] = chan->band_type; rfk_mcc->bw[tbl_sel] = chan->band_width; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c index ce8aaa9501e1..47f855a7a268 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = { .rpwm_addr = R_BE_PCIE_HRPWM, .cpwm_addr = R_BE_PCIE_CRPWM, .mit_addr = R_BE_PCIE_MIT_CH_EN, + .wp_sel_addr = R_BE_WP_ADDR_H_SEL0_3_V1, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = NULL, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_be, diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c new file mode 100644 index 000000000000..e71956ce9853 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/util.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "util.h" + +#define FRAC_ROWS 3 +#define FRAC_ROW_MAX (FRAC_ROWS - 1) +#define NORM_ROW_MIN FRAC_ROWS + +static const u32 db_invert_table[12][8] = { + /* rows 0~2 in unit of U(32,3) */ + {10, 13, 16, 20, 25, 32, 40, 50}, + {64, 80, 101, 128, 160, 201, 256, 318}, + {401, 505, 635, 800, 1007, 1268, 1596, 2010}, + /* rows 3~11 in unit of U(32,0) */ + {316, 398, 501, 631, 794, 1000, 1259, 1585}, + {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000}, + {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098}, + {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107}, + {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886}, + {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254, + 15848932}, + {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823, + 100000000}, + {125892541, 158489319, 199526232, 251188643, 316227766, 398107171, + 501187234, 630957345}, + {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U, + 3162277660U, 3981071706U}, +}; + +u32 rtw89_linear_2_db(u64 val) +{ + u8 i, j; + u32 dB; + + for (i = 0; i < 12; i++) { + for (j = 0; j < 8; j++) { + if (i <= FRAC_ROW_MAX && + (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j]) + goto cnt; + else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j]) + goto cnt; + } + } + + return 96; /* maximum 96 dB */ + +cnt: + /* special cases */ + if (j == 0 && i == 0) + goto end; + + if (i == NORM_ROW_MIN && j == 0) { + if (db_invert_table[NORM_ROW_MIN][0] - val > + val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) { + i = FRAC_ROW_MAX; + j = 7; + } + goto end; + } + + if (i <= FRAC_ROW_MAX) + val <<= RTW89_LINEAR_FRAC_BITS; + + /* compare difference to get precise dB */ + if (j == 0) { + if (db_invert_table[i][j] - val > + val - db_invert_table[i - 1][7]) { + i--; + j = 7; + } + } else { + if (db_invert_table[i][j] - val > + val - db_invert_table[i][j - 1]) { + j--; + } + } +end: + dB = (i << 3) + j + 1; + + return dB; +} +EXPORT_SYMBOL(rtw89_linear_2_db); + +u64 rtw89_db_2_linear(u32 db) +{ + u64 linear; + u8 i, j; + + if (db > 96) + db = 96; + else if (db < 1) + return 1; + + i = (db - 1) >> 3; + j = (db - 1) & 0x7; + + linear = db_invert_table[i][j]; + + if (i >= NORM_ROW_MIN) + linear = linear << RTW89_LINEAR_FRAC_BITS; + + return linear; +} +EXPORT_SYMBOL(rtw89_db_2_linear); diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index e2ed4565025d..e82e7df052d8 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -6,6 +6,8 @@ #include "core.h" +#define RTW89_LINEAR_FRAC_BITS 3 + #define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \ ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \ IEEE80211_IFACE_ITER_NORMAL, iterator, data) @@ -55,4 +57,7 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask) } } +u32 rtw89_linear_2_db(u64 linear); +u64 rtw89_db_2_linear(u32 db); + #endif diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index fa61484c3839..9882064ef68d 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -27,17 +27,23 @@ void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb) rtw_wow->akm = rsn_ie->akm_cipher_suite.type; } +#define RTW89_CIPHER_INFO_DEF(cipher) \ + {WLAN_CIPHER_SUITE_ ## cipher, .fw_alg = RTW89_WOW_FW_ALG_ ## cipher, \ + .len = WLAN_KEY_LEN_ ## cipher} + static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = { - {WLAN_CIPHER_SUITE_WEP40, .fw_alg = 1, .len = WLAN_KEY_LEN_WEP40,}, - {WLAN_CIPHER_SUITE_WEP104, .fw_alg = 2, .len = WLAN_KEY_LEN_WEP104,}, - {WLAN_CIPHER_SUITE_TKIP, .fw_alg = 3, .len = WLAN_KEY_LEN_TKIP,}, - {WLAN_CIPHER_SUITE_CCMP, .fw_alg = 6, .len = WLAN_KEY_LEN_CCMP,}, - {WLAN_CIPHER_SUITE_GCMP, .fw_alg = 8, .len = WLAN_KEY_LEN_GCMP,}, - {WLAN_CIPHER_SUITE_CCMP_256, .fw_alg = 7, .len = WLAN_KEY_LEN_CCMP_256,}, - {WLAN_CIPHER_SUITE_GCMP_256, .fw_alg = 23, .len = WLAN_KEY_LEN_GCMP_256,}, - {WLAN_CIPHER_SUITE_AES_CMAC, .fw_alg = 32, .len = WLAN_KEY_LEN_AES_CMAC,}, + RTW89_CIPHER_INFO_DEF(WEP40), + RTW89_CIPHER_INFO_DEF(WEP104), + RTW89_CIPHER_INFO_DEF(TKIP), + RTW89_CIPHER_INFO_DEF(CCMP), + RTW89_CIPHER_INFO_DEF(GCMP), + RTW89_CIPHER_INFO_DEF(CCMP_256), + RTW89_CIPHER_INFO_DEF(GCMP_256), + RTW89_CIPHER_INFO_DEF(AES_CMAC), }; +#undef RTW89_CIPHER_INFO_DEF + static const struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher) { @@ -717,13 +723,18 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; - u32 wow_reason_reg = rtwdev->chip->wow_reason_reg; struct cfg80211_wowlan_nd_info nd_info; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; + u32 wow_reason_reg; u8 reason; + if (RTW89_CHK_FW_FEATURE(WOW_REASON_V1, &rtwdev->fw)) + wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V1]; + else + wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V0]; + reason = rtw89_read8(rtwdev, wow_reason_reg); switch (reason) { case RTW89_WOW_RSN_RX_DEAUTH: @@ -1284,12 +1295,16 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev) static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev) { + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_vif *vif = rtw_wow->wow_vif; int ret; ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); if (ret) rtw89_err(rtwdev, "cfg ppdu status\n"); + rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); + return ret; } diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h index e595aee0196d..0d90add0e88d 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.h +++ b/drivers/net/wireless/realtek/rtw89/wow.h @@ -35,6 +35,17 @@ enum rtw89_wake_reason { RTW89_WOW_RSN_RX_NLO = 0x55, }; +enum rtw89_fw_alg { + RTW89_WOW_FW_ALG_WEP40 = 0x1, + RTW89_WOW_FW_ALG_WEP104 = 0x2, + RTW89_WOW_FW_ALG_TKIP = 0x3, + RTW89_WOW_FW_ALG_CCMP = 0x6, + RTW89_WOW_FW_ALG_CCMP_256 = 0x7, + RTW89_WOW_FW_ALG_GCMP = 0x8, + RTW89_WOW_FW_ALG_GCMP_256 = 0x9, + RTW89_WOW_FW_ALG_AES_CMAC = 0xa, +}; + struct rtw89_cipher_suite { u8 oui[3]; u8 type; @@ -64,6 +75,25 @@ struct rtw89_set_key_info_iter_data { bool error; }; +static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + + if (!(rtwdev->chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))) + return 0; + + switch (rtw_wow->ptk_alg) { + case RTW89_WOW_FW_ALG_WEP40: + return 4; + case RTW89_WOW_FW_ALG_TKIP: + case RTW89_WOW_FW_ALG_CCMP: + case RTW89_WOW_FW_ALG_GCMP_256: + return 8; + default: + return 0; + } +} + #ifdef CONFIG_PM int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan); int rtw89_wow_resume(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 8491eb32f760..714e1f04b0cb 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -69,6 +69,10 @@ static bool mlo; module_param(mlo, bool, 0444); MODULE_PARM_DESC(mlo, "Support MLO"); +static bool multi_radio; +module_param(multi_radio, bool, 0444); +MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy"); + /** * enum hwsim_regtest - the type of regulatory tests we offer * @@ -669,6 +673,10 @@ struct mac80211_hwsim_data { struct ieee80211_iface_limit if_limits[3]; int n_if_limits; + struct ieee80211_iface_combination if_combination_radio; + struct wiphy_radio_freq_range radio_range[NUM_NL80211_BANDS]; + struct wiphy_radio radio[NUM_NL80211_BANDS]; + u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; @@ -917,6 +925,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy), [HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy), + [HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG }, }; #if IS_REACHABLE(CONFIG_VIRTIO) @@ -3261,7 +3270,7 @@ static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw, hwsim_clear_chanctx_magic(vifs[i].old_ctx); break; default: - WARN_ON("Invalid mode"); + WARN(1, "Invalid mode %d\n", mode); } } return 0; @@ -4018,6 +4027,7 @@ struct hwsim_new_radio_params { bool reg_strict; bool p2p_device; bool use_chanctx; + bool multi_radio; bool destroy_on_close; const char *hwname; bool no_vif; @@ -4094,6 +4104,12 @@ static int append_radio_msg(struct sk_buff *skb, int id, return ret; } + if (param->multi_radio) { + ret = nla_put_flag(skb, HWSIM_ATTR_MULTI_RADIO); + if (ret < 0) + return ret; + } + if (param->hwname) { ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(param->hwname), param->hwname); @@ -5114,6 +5130,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, struct net *net; int idx, i; int n_limits = 0; + int n_bands = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; @@ -5217,22 +5234,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, n_limits++; } + data->if_combination.radar_detect_widths = + BIT(NL80211_CHAN_WIDTH_5) | + BIT(NL80211_CHAN_WIDTH_10) | + BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160); + if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; - data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; } else { data->if_combination.num_different_channels = 1; - data->if_combination.radar_detect_widths = - BIT(NL80211_CHAN_WIDTH_5) | - BIT(NL80211_CHAN_WIDTH_10) | - BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160); } if (!n_limits) { @@ -5350,6 +5367,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; + struct wiphy_radio_freq_range *radio_range; + const struct ieee80211_channel *c; + struct wiphy_radio *radio; sband->band = band; @@ -5423,8 +5443,36 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, mac80211_hwsim_sband_capab(sband); hw->wiphy->bands[band] = sband; + + if (!param->multi_radio) + continue; + + c = sband->channels; + radio_range = &data->radio_range[n_bands]; + radio_range->start_freq = ieee80211_channel_to_khz(c) - 10000; + + c += sband->n_channels - 1; + radio_range->end_freq = ieee80211_channel_to_khz(c) + 10000; + + radio = &data->radio[n_bands++]; + radio->freq_range = radio_range; + radio->n_freq_range = 1; + radio->iface_combinations = &data->if_combination_radio; + radio->n_iface_combinations = 1; } + if (param->multi_radio) { + hw->wiphy->radio = data->radio; + hw->wiphy->n_radio = n_bands; + + memcpy(&data->if_combination_radio, &data->if_combination, + sizeof(data->if_combination)); + data->if_combination.num_different_channels *= n_bands; + } + + if (data->use_chanctx) + data->if_combination.radar_detect_widths = 0; + /* By default all radios belong to the first group */ data->group = 1; mutex_init(&data->mutex); @@ -6042,6 +6090,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) else param.use_chanctx = (param.channels > 1); + if (info->attrs[HWSIM_ATTR_MULTI_RADIO]) + param.multi_radio = true; + if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); @@ -6122,7 +6173,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT]; - if (param.mlo) + if (param.mlo || param.multi_radio) param.use_chanctx = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { @@ -6815,7 +6866,8 @@ static int __init init_mac80211_hwsim(void) param.p2p_device = support_p2p_device; param.mlo = mlo; - param.use_chanctx = channels > 1 || mlo; + param.multi_radio = multi_radio; + param.use_chanctx = channels > 1 || mlo || multi_radio; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; if (param.p2p_device) param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h index 21b1afd83dc1..f32fc3a492b0 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.h +++ b/drivers/net/wireless/virtual/mac80211_hwsim.h @@ -157,6 +157,9 @@ enum hwsim_commands { * to provide details about peer measurement request (nl80211_peer_measurement_attrs) * @HWSIM_ATTR_PMSR_RESULT: nested attributed used with %HWSIM_CMD_REPORT_PMSR * to provide peer measurement result (nl80211_peer_measurement_attrs) + * @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag). + * Adds one radio for each band. Number of supported channels will be set for + * each radio instead of for the wiphy. * @__HWSIM_ATTR_MAX: enum limit */ enum hwsim_attrs { @@ -189,6 +192,7 @@ enum hwsim_attrs { HWSIM_ATTR_PMSR_SUPPORT, HWSIM_ATTR_PMSR_REQUEST, HWSIM_ATTR_PMSR_RESULT, + HWSIM_ATTR_MULTI_RADIO, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) @@ -257,7 +261,7 @@ enum hwsim_tx_rate_flags { }; /** - * struct hwsim_tx_rate - rate selection/status + * struct hwsim_tx_rate_flag - rate selection/status * * @idx: rate index to attempt to send with * @flags: the rate flags according to &enum hwsim_tx_rate_flags @@ -295,7 +299,7 @@ enum hwsim_vqs { }; /** - * enum hwsim_rate_info -- bitrate information. + * enum hwsim_rate_info_attributes - bitrate information. * * Information about a receiving or transmitting bitrate * that can be mapped to struct rate_info diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 6a84ec58d618..4ee374080466 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = { /* Assigned at module init. Guaranteed locally-administered and unicast. */ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; +#define VIRT_WIFI_SSID "VirtWifi" +#define VIRT_WIFI_SSID_LEN 8 + static void virt_wifi_inform_bss(struct wiphy *wiphy) { u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); @@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy) u8 ssid[8]; } __packed ssid = { .tag = WLAN_EID_SSID, - .len = 8, - .ssid = "VirtWifi", + .len = VIRT_WIFI_SSID_LEN, + .ssid = VIRT_WIFI_SSID, }; informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, @@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv { struct net_device *upperdev; u32 tx_packets; u32 tx_failed; + u32 connect_requested_ssid_len; + u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN]; u8 connect_requested_bss[ETH_ALEN]; bool is_up; bool is_connected; @@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, if (priv->being_deleted || !priv->is_up) return -EBUSY; + if (!sme->ssid) + return -EINVAL; + + priv->connect_requested_ssid_len = sme->ssid_len; + memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len); + could_schedule = schedule_delayed_work(&priv->connect, HZ * 2); if (!could_schedule) return -EBUSY; @@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work) container_of(work, struct virt_wifi_netdev_priv, connect.work); u8 *requested_bss = priv->connect_requested_bss; bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); + bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN && + !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID, + priv->connect_requested_ssid_len); u16 status = WLAN_STATUS_SUCCESS; if (is_zero_ether_addr(requested_bss)) requested_bss = NULL; - if (!priv->is_up || (requested_bss && !right_addr)) + if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid) status = WLAN_STATUS_UNSPECIFIED_FAILURE; else priv->is_connected = true; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index e1ec3b7200d7..f8dd7eb40fbe 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -396,10 +396,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, if (!config->base_dev) return -EINVAL; - if (config->type == NVMEM_TYPE_FRAM) - bin_attr_nvmem_eeprom_compat.attr.name = "fram"; - nvmem->eeprom = bin_attr_nvmem_eeprom_compat; + if (config->type == NVMEM_TYPE_FRAM) + nvmem->eeprom.attr.name = "fram"; nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); nvmem->eeprom.size = nvmem->size; #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -463,7 +462,7 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) "%s@%x,%x", entry->name, entry->offset, entry->bit_offset); - attrs[i].attr.mode = 0444; + attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); attrs[i].size = entry->bytes; attrs[i].read = &nvmem_cell_attr_read; attrs[i].private = entry; diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c index 33678d0af2c2..6c2f80e166e2 100644 --- a/drivers/nvmem/meson-efuse.c +++ b/drivers/nvmem/meson-efuse.c @@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset, void *val, size_t bytes) { struct meson_sm_firmware *fw = context; + int ret; - return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, - bytes, 0, 0, 0); + ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); + + return ret < 0 ? ret : 0; } static int meson_efuse_write(void *context, unsigned int offset, void *val, size_t bytes) { struct meson_sm_firmware *fw = context; + int ret; + + ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, + bytes, 0, 0, 0); - return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, - bytes, 0, 0, 0); + return ret < 0 ? ret : 0; } static const struct of_device_id meson_efuse_match[] = { diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c index 752d0bf4445e..7f907c5a445e 100644 --- a/drivers/nvmem/rmem.c +++ b/drivers/nvmem/rmem.c @@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset, memunmap(addr); - return count; + if (count < 0) + return count; + + return count == bytes ? 0 : -EIO; } static int rmem_probe(struct platform_device *pdev) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 462375b293e4..c94203ce65bb 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -81,7 +81,8 @@ EXPORT_SYMBOL_GPL(of_irq_find_parent); /* * These interrupt controllers abuse interrupt-map for unspeakable * reasons and rely on the core code to *ignore* it (the drivers do - * their own parsing of the property). + * their own parsing of the property). The PAsemi entry covers a + * non-sensical interrupt-map that is better left ignored. * * If you think of adding to the list for something *new*, think * again. There is a high chance that you will be sent back to the @@ -95,6 +96,7 @@ static const char * const of_irq_imap_abusers[] = { "fsl,ls1043a-extirq", "fsl,ls1088a-extirq", "renesas,rza1-irqc", + "pasemi,rootbus", NULL, }; @@ -293,20 +295,8 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) imaplen -= imap - oldimap; pr_debug(" -> imaplen=%d\n", imaplen); } - if (!match) { - if (intc) { - /* - * The PASEMI Nemo is a known offender, so - * let's only warn for anyone else. - */ - WARN(!IS_ENABLED(CONFIG_PPC_PASEMI), - "%pOF interrupt-map failed, using interrupt-controller\n", - ipar); - return 0; - } - + if (!match) goto fail; - } /* * Successfully parsed an interrupt-map translation; copy new diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index 78c490e0505a..0a02e85a8951 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -167,7 +167,7 @@ u64 riscv_pmu_event_update(struct perf_event *event) unsigned long cmask; u64 oldval, delta; - if (!rvpmu->ctr_read) + if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE)) return 0; cmask = riscv_pmu_ctr_get_width_mask(event); diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index a2e4005e1fd0..4e842dcedfba 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -20,6 +20,7 @@ #include <linux/cpu_pm.h> #include <linux/sched/clock.h> #include <linux/soc/andes/irq.h> +#include <linux/workqueue.h> #include <asm/errata_list.h> #include <asm/sbi.h> @@ -114,7 +115,7 @@ struct sbi_pmu_event_data { }; }; -static const struct sbi_pmu_event_data pmu_hw_event_map[] = { +static struct sbi_pmu_event_data pmu_hw_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = { SBI_PMU_HW_CPU_CYCLES, SBI_PMU_EVENT_TYPE_HW, 0}}, @@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = { }; #define C(x) PERF_COUNT_HW_CACHE_##x -static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] +static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { @@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M }, }; +static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, + 0, cmask, 0, edata->event_idx, 0, 0); + if (!ret.error) { + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, + ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); + } else if (ret.error == SBI_ERR_NOT_SUPPORTED) { + /* This event cannot be monitored by any counter */ + edata->event_idx = -EINVAL; + } +} + +static void pmu_sbi_check_std_events(struct work_struct *work) +{ + for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) + pmu_sbi_check_event(&pmu_hw_event_map[i]); + + for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) + for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) + for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) + pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]); +} + +static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events); + static int pmu_sbi_ctr_get_width(int idx) { return pmu_ctr_list[idx].width; @@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) u64 raw_config_val; int ret; + /* + * Ensure we are finished checking standard hardware events for + * validity before allowing userspace to configure any events. + */ + flush_work(&check_std_events_work); + switch (type) { case PERF_TYPE_HARDWARE: if (config >= PERF_COUNT_HW_MAX) @@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) * which may include counters that are not enabled yet. */ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, - 0, pmu->cmask, 0, 0, 0, 0); + 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); } static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) @@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) goto out_unregister; + /* Asynchronously check which standard events are available */ + schedule_work(&check_std_events_work); + return 0; out_unregister: diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 10d0ce6c8342..78a5aac2dcfd 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -3299,6 +3299,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = { }, .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART), }, + { } }; static int toshiba_acpi_add(struct acpi_device *acpi_dev) diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c index de9121ef4216..d2cb4271a1ca 100644 --- a/drivers/pmdomain/qcom/rpmhpd.c +++ b/drivers/pmdomain/qcom/rpmhpd.c @@ -40,6 +40,7 @@ * @addr: Resource address as looped up using resource name from * cmd-db * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource + * @skip_retention_level: Indicate that retention level should not be used for the power domain */ struct rpmhpd { struct device *dev; @@ -56,6 +57,7 @@ struct rpmhpd { const char *res_name; u32 addr; bool state_synced; + bool skip_retention_level; }; struct rpmhpd_desc { @@ -173,6 +175,7 @@ static struct rpmhpd mxc = { .pd = { .name = "mxc", }, .peer = &mxc_ao, .res_name = "mxc.lvl", + .skip_retention_level = true, }; static struct rpmhpd mxc_ao = { @@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = { .active_only = true, .peer = &mxc, .res_name = "mxc.lvl", + .skip_retention_level = true, }; static struct rpmhpd nsp = { @@ -819,6 +823,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) return -EINVAL; for (i = 0; i < rpmhpd->level_count; i++) { + if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION) + continue; + rpmhpd->level[i] = buf[i]; /* Remember the first corner with non-zero level */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6b64af7d4927..1b7561abe05d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -4119,8 +4119,6 @@ static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = dev_get_drvdata(dev); - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); - if (opal_unlock_from_suspend(sdkp->opal_dev)) { sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n"); return -EIO; @@ -4137,12 +4135,13 @@ static int sd_resume_common(struct device *dev, bool runtime) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ return 0; + sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); + if (!sd_do_start_stop(sdkp->device, runtime)) { sdkp->suspended = false; return 0; } - sd_printk(KERN_NOTICE, sdkp, "Starting disk\n"); ret = sd_start_stop_device(sdkp, 1); if (!ret) { sd_resume(dev); diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 40fb09d69014..65279243072c 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -348,11 +348,15 @@ static void pmic_glink_remove(struct platform_device *pdev) mutex_unlock(&__pmic_glink_lock); } +static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | + BIT(PMIC_GLINK_CLIENT_ALTMODE); + static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | BIT(PMIC_GLINK_CLIENT_ALTMODE) | BIT(PMIC_GLINK_CLIENT_UCSI); static const struct of_device_id pmic_glink_of_match[] = { + { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask }, { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask }, {} }; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index e358ac5b4509..96a524772549 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -164,16 +164,20 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, } static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry, - int delay_ns, u32 sclk_hz) + int delay_ns, int inst_ns, u32 sclk_hz) { unsigned int t; - /* negative delay indicates error, e.g. from spi_delay_to_ns() */ - if (delay_ns <= 0) + /* + * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if + * delay is less that the instruction execution time, there is no need + * for an extra sleep instruction since the instruction execution time + * will already cover the required delay. + */ + if (delay_ns < 0 || delay_ns <= inst_ns) return; - /* rounding down since executing the instruction adds a couple of ticks delay */ - t = DIV_ROUND_DOWN_ULL((u64)delay_ns * sclk_hz, NSEC_PER_SEC); + t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC); while (t) { unsigned int n = min(t, 256U); @@ -220,10 +224,16 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, struct spi_device *spi = msg->spi; struct spi_controller *host = spi->controller; struct spi_transfer *xfer; - int clk_div, new_clk_div; + int clk_div, new_clk_div, inst_ns; bool keep_cs = false; u8 bits_per_word = 0; + /* + * Take into account instruction execution time for more accurate sleep + * times, especially when the delay is small. + */ + inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz); + clk_div = 1; spi_engine_program_add_cmd(p, dry, @@ -252,7 +262,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, spi_engine_gen_xfer(p, dry, xfer); spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), - xfer->effective_speed_hz); + inst_ns, xfer->effective_speed_hz); if (xfer->cs_change) { if (list_is_last(&xfer->transfer_list, &msg->transfers)) { @@ -262,7 +272,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, spi_engine_gen_cs(p, dry, spi, false); spi_engine_gen_sleep(p, dry, spi_delay_to_ns( - &xfer->cs_change_delay, xfer), + &xfer->cs_change_delay, xfer), inst_ns, xfer->effective_speed_hz); if (!list_next_entry(xfer, transfer_list)->cs_off) diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index be3998104bfb..f7e8b5efa50e 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -984,6 +984,9 @@ static int davinci_spi_probe(struct platform_device *pdev) return ret; free_dma: + /* This bit needs to be cleared to disable dpsi->clk */ + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + if (dspi->dma_rx) { dma_release_channel(dspi->dma_rx); dma_release_channel(dspi->dma_tx); @@ -1013,6 +1016,9 @@ static void davinci_spi_remove(struct platform_device *pdev) spi_bitbang_stop(&dspi->bitbang); + /* This bit needs to be cleared to disable dpsi->clk */ + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); + if (dspi->dma_rx) { dma_release_channel(dspi->dma_rx); dma_release_channel(dspi->dma_tx); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 33164ebdb583..1439883326cf 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1050,7 +1050,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = { .rx_available = mx31_rx_available, .reset = mx31_reset, .fifo_size = 8, - .has_dmamode = true, + .has_dmamode = false, .dynamic_burst = false, .has_targetmode = false, .devtype = IMX35_CSPI, diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index 5d72e3d59df8..c02c4204442f 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -158,12 +158,14 @@ static int spi_mux_probe(struct spi_device *spi) /* supported modes are the same as our parent's */ ctlr->mode_bits = spi->controller->mode_bits; ctlr->flags = spi->controller->flags; + ctlr->bits_per_word_mask = spi->controller->bits_per_word_mask; ctlr->transfer_one_message = spi_mux_transfer_one_message; ctlr->setup = spi_mux_setup; ctlr->num_chipselect = mux_control_states(priv->mux); ctlr->bus_num = -1; ctlr->dev.of_node = spi->dev.of_node; ctlr->must_async = true; + ctlr->defer_optimize_message = true; ret = devm_spi_register_controller(&spi->dev, ctlr); if (ret) diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7e3083b83534..002f29dbcea6 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1277,24 +1277,11 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, /* * Check if this transfer contains only one word; - * OR contains 1 to 4 words, with bits_per_word == 8 and no delay between each word - * OR contains 1 to 2 words, with bits_per_word == 16 and no delay between each word - * - * If one of the two last case is true, this also change the bits_per_word of this - * transfer to make it a bit faster. - * It's not an issue to change the bits_per_word here even if the multi-mode is not - * applicable for this message, the signal on the wire will be the same. */ if (bits_per_word < 8 && tr->len == 1) { /* multi-mode is applicable, only one word (1..7 bits) */ - } else if (tr->word_delay.value == 0 && bits_per_word == 8 && tr->len <= 4) { - /* multi-mode is applicable, only one "bigger" word (8,16,24,32 bits) */ - tr->bits_per_word = tr->len * bits_per_word; - } else if (tr->word_delay.value == 0 && bits_per_word == 16 && tr->len <= 2) { - /* multi-mode is applicable, only one "bigger" word (16,32 bits) */ - tr->bits_per_word = tr->len * bits_per_word / 2; } else if (bits_per_word >= 8 && tr->len == bits_per_word / 8) { - /* multi-mode is applicable, only one word (9..15,17..32 bits) */ + /* multi-mode is applicable, only one word (8..32 bits) */ } else { /* multi-mode is not applicable: more than one word in the transfer */ mcspi->use_multi_mode = false; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fc13fa192189..0f04e832f9ec 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2151,7 +2151,8 @@ static void __spi_unoptimize_message(struct spi_message *msg) */ static void spi_maybe_unoptimize_message(struct spi_message *msg) { - if (!msg->pre_optimized && msg->optimized) + if (!msg->pre_optimized && msg->optimized && + !msg->spi->controller->defer_optimize_message) __spi_unoptimize_message(msg); } @@ -4294,6 +4295,11 @@ static int __spi_optimize_message(struct spi_device *spi, static int spi_maybe_optimize_message(struct spi_device *spi, struct spi_message *msg) { + if (spi->controller->defer_optimize_message) { + msg->spi = spi; + return 0; + } + if (msg->pre_optimized) return 0; @@ -4324,6 +4330,13 @@ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) { int ret; + /* + * Pre-optimization is not supported and optimization is deferred e.g. + * when using spi-mux. + */ + if (spi->controller->defer_optimize_message) + return 0; + ret = __spi_optimize_message(spi, msg); if (ret) return ret; @@ -4350,6 +4363,9 @@ EXPORT_SYMBOL_GPL(spi_optimize_message); */ void spi_unoptimize_message(struct spi_message *msg) { + if (msg->spi->controller->defer_optimize_message) + return; + __spi_unoptimize_message(msg); msg->pre_optimized = false; } @@ -4432,8 +4448,6 @@ int spi_async(struct spi_device *spi, struct spi_message *message) spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - spi_maybe_unoptimize_message(message); - return ret; } EXPORT_SYMBOL_GPL(spi_async); diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 45f04a25255a..1b2345a697c5 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -759,6 +759,9 @@ static void power_allocator_manage(struct thermal_zone_device *tz) return; } + if (!params->trip_max) + return; + allocate_power(tz, params->trip_max->temperature); params->update_cdevs = true; } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 1b0ab2790860..ecc748d15eb7 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -300,6 +300,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies); else if (tz->polling_delay_jiffies) thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies); + else if (tz->temperature == THERMAL_TEMP_INVALID) + thermal_zone_device_set_polling(tz, msecs_to_jiffies(THERMAL_RECHECK_DELAY_MS)); } static struct thermal_governor *thermal_get_tz_governor(struct thermal_zone_device *tz) @@ -482,16 +484,14 @@ static void thermal_trip_crossed(struct thermal_zone_device *tz, thermal_governor_trip_crossed(governor, tz, trip, crossed_up); } -static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a, +static int thermal_trip_notify_cmp(void *not_used, const struct list_head *a, const struct list_head *b) { struct thermal_trip_desc *tda = container_of(a, struct thermal_trip_desc, notify_list_node); struct thermal_trip_desc *tdb = container_of(b, struct thermal_trip_desc, notify_list_node); - int ret = tdb->notify_temp - tda->notify_temp; - - return ascending ? ret : -ret; + return tda->notify_temp - tdb->notify_temp; } void __thermal_zone_device_update(struct thermal_zone_device *tz, @@ -511,7 +511,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, update_temperature(tz); if (tz->temperature == THERMAL_TEMP_INVALID) - return; + goto monitor; __thermal_zone_set_trips(tz); @@ -520,12 +520,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, for_each_trip_desc(tz, td) handle_thermal_trip(tz, td, &way_up_list, &way_down_list); - list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp); + list_sort(NULL, &way_up_list, thermal_trip_notify_cmp); list_for_each_entry(td, &way_up_list, notify_list_node) thermal_trip_crossed(tz, &td->trip, governor, true); list_sort(NULL, &way_down_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_down_list, notify_list_node) + list_for_each_entry_reverse(td, &way_down_list, notify_list_node) thermal_trip_crossed(tz, &td->trip, governor, false); if (governor->manage) @@ -533,6 +533,7 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, thermal_debug_update_trip_stats(tz); +monitor: monitor_thermal_zone(tz); } diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 66f67e54e0c8..94eeb4011a48 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -133,6 +133,12 @@ struct thermal_zone_device { struct thermal_trip_desc trips[] __counted_by(num_trips); }; +/* + * Default delay after a failing thermal zone temperature check before + * attempting to check it again. + */ +#define THERMAL_RECHECK_DELAY_MS 250 + /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index ddac0a13cf84..1af9aed99c65 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -672,7 +672,8 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id) * https://www.ti.com/lit/pdf/sprz536 */ if (priv->habit & UART_RX_TIMEOUT_QUIRK && - (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT) { + (iir & UART_IIR_RX_TIMEOUT) == UART_IIR_RX_TIMEOUT && + serial_port_in(port, UART_OMAP_RX_LVL) == 0) { unsigned char efr2, timeout_h, timeout_l; efr2 = serial_in(up, UART_OMAP_EFR2); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index f4f40c9373c2..ff32cd2d2863 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -120,6 +120,7 @@ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ +#define UFCR_RXTL_MASK 0x3F /* Receiver trigger 6 bits wide */ #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) @@ -1551,6 +1552,7 @@ static void imx_uart_shutdown(struct uart_port *port) struct imx_port *sport = (struct imx_port *)port; unsigned long flags; u32 ucr1, ucr2, ucr4, uts; + int loops; if (sport->dma_is_enabled) { dmaengine_terminate_sync(sport->dma_chan_tx); @@ -1613,6 +1615,56 @@ static void imx_uart_shutdown(struct uart_port *port) ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); + /* + * We have to ensure the tx state machine ends up in OFF. This + * is especially important for rs485 where we must not leave + * the RTS signal high, blocking the bus indefinitely. + * + * All interrupts are now disabled, so imx_uart_stop_tx() will + * no longer be called from imx_uart_transmit_buffer(). It may + * still be called via the hrtimers, and if those are in play, + * we have to honour the delays. + */ + if (sport->tx_state == WAIT_AFTER_RTS || sport->tx_state == SEND) + imx_uart_stop_tx(port); + + /* + * In many cases (rs232 mode, or if tx_state was + * WAIT_AFTER_RTS, or if tx_state was SEND and there is no + * delay_rts_after_send), this will have moved directly to + * OFF. In rs485 mode, tx_state might already have been + * WAIT_AFTER_SEND and the hrtimer thus already started, or + * the above imx_uart_stop_tx() call could have started it. In + * those cases, we have to wait for the hrtimer to fire and + * complete the transition to OFF. + */ + loops = port->rs485.flags & SER_RS485_ENABLED ? + port->rs485.delay_rts_after_send : 0; + while (sport->tx_state != OFF && loops--) { + uart_port_unlock_irqrestore(&sport->port, flags); + msleep(1); + uart_port_lock_irqsave(&sport->port, &flags); + } + + if (sport->tx_state != OFF) { + dev_warn(sport->port.dev, "unexpected tx_state %d\n", + sport->tx_state); + /* + * This machine may be busted, but ensure the RTS + * signal is inactive in order not to block other + * devices. + */ + if (port->rs485.flags & SER_RS485_ENABLED) { + ucr2 = imx_uart_readl(sport, UCR2); + if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) + imx_uart_rts_active(sport, &ucr2); + else + imx_uart_rts_inactive(sport, &ucr2); + imx_uart_writel(sport, ucr2, UCR2); + } + sport->tx_state = OFF; + } + uart_port_unlock_irqrestore(&sport->port, flags); clk_disable_unprepare(sport->clk_per); @@ -1933,7 +1985,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio struct serial_rs485 *rs485conf) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr2; + u32 ucr2, ufcr; if (rs485conf->flags & SER_RS485_ENABLED) { /* Enable receiver if low-active RTS signal is requested */ @@ -1953,7 +2005,10 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio /* Make sure Rx is enabled in case Tx is active with Rx disabled */ if (!(rs485conf->flags & SER_RS485_ENABLED) || rs485conf->flags & SER_RS485_RX_DURING_TX) { - imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); + /* If the receiver trigger is 0, set it to a default value */ + ufcr = imx_uart_readl(sport, UFCR); + if ((ufcr & UFCR_RXTL_MASK) == 0) + imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); imx_uart_start_rx(port); } diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c index 19f0a305cc43..3b4206e815fe 100644 --- a/drivers/tty/serial/ma35d1_serial.c +++ b/drivers/tty/serial/ma35d1_serial.c @@ -688,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev) struct uart_ma35d1_port *up; int ret = 0; - if (pdev->dev.of_node) { - ret = of_alias_get_id(pdev->dev.of_node, "serial"); - if (ret < 0) { - dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); - return ret; - } + if (!pdev->dev.of_node) + return -ENODEV; + + ret = of_alias_get_id(pdev->dev.of_node, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); + return ret; } up = &ma35d1serial_ports[ret]; up->port.line = ret; diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 2bd25afe0d92..69a632fefc41 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -649,15 +649,25 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport) static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport) { + unsigned char c; u32 irq_en; - if (qcom_geni_serial_main_active(uport) || - !qcom_geni_serial_tx_empty(uport)) - return; + /* + * Start a new transfer in case the previous command was cancelled and + * left data in the FIFO which may prevent the watermark interrupt + * from triggering. Note that the stale data is discarded. + */ + if (!qcom_geni_serial_main_active(uport) && + !qcom_geni_serial_tx_empty(uport)) { + if (uart_fifo_out(uport, &c, 1) == 1) { + writel(M_CMD_DONE_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + qcom_geni_serial_setup_tx(uport, 1); + writel(c, uport->membase + SE_GENI_TX_FIFOn); + } + } irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN; - writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG); writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); } @@ -665,13 +675,17 @@ static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport) static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport) { u32 irq_en; - struct qcom_geni_serial_port *port = to_dev_port(uport); irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN); writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG); writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN); - /* Possible stop tx is called multiple times. */ +} + +static void qcom_geni_serial_cancel_tx_cmd(struct uart_port *uport) +{ + struct qcom_geni_serial_port *port = to_dev_port(uport); + if (!qcom_geni_serial_main_active(uport)) return; @@ -684,6 +698,8 @@ static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport) writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); } writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR); + + port->tx_remaining = 0; } static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop) @@ -862,7 +878,7 @@ static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport, memset(buf, 0, sizeof(buf)); tx_bytes = min(remaining, BYTES_PER_FIFO_WORD); - tx_bytes = uart_fifo_out(uport, buf, tx_bytes); + uart_fifo_out(uport, buf, tx_bytes); iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); @@ -890,13 +906,17 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, else pending = kfifo_len(&tport->xmit_fifo); - /* All data has been transmitted and acknowledged as received */ - if (!pending && !status && done) { + /* All data has been transmitted or command has been cancelled */ + if (!pending && done) { qcom_geni_serial_stop_tx_fifo(uport); goto out_write_wakeup; } - avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + if (active) + avail = port->tx_fifo_depth - (status & TX_FIFO_WC); + else + avail = port->tx_fifo_depth; + avail *= BYTES_PER_FIFO_WORD; chunk = min(avail, pending); @@ -1069,11 +1089,15 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport) { disable_irq(uport->irq); - if (uart_console(uport)) - return; - qcom_geni_serial_stop_tx(uport); qcom_geni_serial_stop_rx(uport); + + qcom_geni_serial_cancel_tx_cmd(uport); +} + +static void qcom_geni_serial_flush_buffer(struct uart_port *uport) +{ + qcom_geni_serial_cancel_tx_cmd(uport); } static int qcom_geni_serial_port_setup(struct uart_port *uport) @@ -1532,6 +1556,7 @@ static const struct uart_ops qcom_geni_console_pops = { .request_port = qcom_geni_serial_request_port, .config_port = qcom_geni_serial_config_port, .shutdown = qcom_geni_serial_shutdown, + .flush_buffer = qcom_geni_serial_flush_buffer, .type = qcom_geni_serial_get_type, .set_mctrl = qcom_geni_serial_set_mctrl, .get_mctrl = qcom_geni_serial_get_mctrl, diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 8944548c30fa..c532416aec22 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); * @hba: per adapter instance * @req: pointer to the request to be issued * - * Return: the hardware queue instance on which the request would - * be queued. + * Return: the hardware queue instance on which the request will be or has + * been queued. %NULL if the request has already been freed. */ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req) { - u32 utag = blk_mq_unique_tag(req); - u32 hwq = blk_mq_unique_tag_to_hwq(utag); + struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx); - return &hba->uhq[hwq]; + return hctx ? &hba->uhq[hctx->queue_num] : NULL; } /** @@ -515,6 +514,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) if (!cmd) return -EINVAL; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + return 0; } else { hwq = hba->dev_cmd_queue; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 1b65e6ae4137..46433ecf0c4d 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -6456,6 +6456,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) /* Release cmd in MCQ mode if abort succeeds */ if (is_mcq_enabled(hba) && (*ret == 0)) { hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); + if (!hwq) + return 0; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 3362af165ef5..880d52c0949d 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, if (ifp->desc.bNumEndpoints >= num_ep) goto skip_to_next_endpoint_or_interface_descriptor; + /* Save a copy of the descriptor and use it instead of the original */ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + memcpy(&endpoint->desc, d, n); + d = &endpoint->desc; + + /* Clear the reserved bits in bEndpointAddress */ + i = d->bEndpointAddress & + (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK); + if (i != d->bEndpointAddress) { + dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n", + cfgno, inum, asnum, d->bEndpointAddress, i); + endpoint->desc.bEndpointAddress = i; + } + /* Check for duplicate endpoint addresses */ if (config_endpoint_is_duplicate(config, inum, asnum, d)) { dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", @@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, } } - endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + /* Accept this endpoint */ ++ifp->desc.bNumEndpoints; - - memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); /* diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c index f1a499ee482c..763e4122ed5b 100644 --- a/drivers/usb/core/of.c +++ b/drivers/usb/core/of.c @@ -84,9 +84,12 @@ static bool usb_of_has_devices_or_graph(const struct usb_device *hub) if (of_graph_is_present(np)) return true; - for_each_child_of_node(np, child) - if (of_property_present(child, "reg")) + for_each_child_of_node(np, child) { + if (of_property_present(child, "reg")) { + of_node_put(child); return true; + } + } return false; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index b4783574b8e6..13171454f959 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | USB_QUIRK_DELAY_CTRL_MSG }, + /* START BP-850k Printer */ + { USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF }, + /* MIDI keyboard WORLDE MINI */ { USB_DEVICE(0x1c75, 0x0204), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 9ef821ca2fc7..052852f80146 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -54,6 +54,10 @@ #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e #define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 +#define PCI_DEVICE_ID_INTEL_PTLH 0xe332 +#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e +#define PCI_DEVICE_ID_INTEL_PTLU 0xe432 +#define PCI_DEVICE_ID_INTEL_PTLU_PCH 0xe47e #define PCI_DEVICE_ID_AMD_MR 0x163a #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" @@ -430,6 +434,10 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) }, { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) }, { PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) }, diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index ce3cfa1f36f5..0e7c1e947c0a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy) int ret; char *str; char *copy = *s_copy; + ret = strlen(s); if (ret > USB_MAX_STRING_LEN) return -EOVERFLOW; + if (ret < 1) + return -EINVAL; if (copy) { str = copy; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 37eb37b0affa..0a8cf6c17f82 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1125,10 +1125,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Start the secondary HCD\n"); retval = xhci_run(xhci->shared_hcd); } - + if (retval) + return retval; + /* + * Resume roothubs unconditionally as PORTSC change bits are not + * immediately visible after xHC reset + */ hcd->state = HC_STATE_SUSPENDED; - if (xhci->shared_hcd) + + if (xhci->shared_hcd) { xhci->shared_hcd->state = HC_STATE_SUSPENDED; + usb_hcd_resume_root_hub(xhci->shared_hcd); + } + usb_hcd_resume_root_hub(hcd); + goto done; } @@ -1152,7 +1162,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbc_resume(xhci); - done: if (retval == 0) { /* * Resume roothubs only if there are pending events. @@ -1178,6 +1187,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) usb_hcd_resume_root_hub(hcd); } } +done: /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 8b0308d84270..85697466b147 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port) kfree(mos7840_port); } +static int mos7840_suspend(struct usb_serial *serial, pm_message_t message) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + usb_kill_urb(mos7840_port->read_urb); + mos7840_port->read_urb_busy = false; + } + + return 0; +} + +static int mos7840_resume(struct usb_serial *serial) +{ + struct moschip_port *mos7840_port; + struct usb_serial_port *port; + int res; + int i; + + for (i = 0; i < serial->num_ports; ++i) { + port = serial->port[i]; + if (!tty_port_initialized(&port->port)) + continue; + + mos7840_port = usb_get_serial_port_data(port); + + mos7840_port->read_urb_busy = true; + res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO); + if (res) + mos7840_port->read_urb_busy = false; + } + + return 0; +} + static struct usb_serial_driver moschip7840_4port_device = { .driver = { .owner = THIS_MODULE, @@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = { .port_probe = mos7840_port_probe, .port_remove = mos7840_port_remove, .read_bulk_callback = mos7840_bulk_in_callback, + .suspend = mos7840_suspend, + .resume = mos7840_resume, }; static struct usb_serial_driver * const serial_drivers[] = { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8a5846d4adf6..311040f9b935 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */ + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */ + .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ @@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */ + .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ @@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00), + .driver_info = NCTRL(2) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00), + .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), .driver_info = RSVD(1) | RSVD(4) }, @@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */ .driver_info = RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */ + .driver_info = RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ @@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */ .driver_info = RSVD(5) }, + { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */ + .driver_info = RSVD(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 987c7921affa..ba0ce0075b2f 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( struct vfio_pci_hot_reset_info hdr; struct vfio_pci_fill_info fill = {}; bool slot = false; - int ret, count; + int ret, count = 0; if (copy_from_user(&hdr, arg, minsz)) return -EFAULT; diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 1de9fac3bcf4..658f11aebda1 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -3,6 +3,7 @@ #include "alloc_background.h" #include "alloc_foreground.h" #include "backpointers.h" +#include "bkey_buf.h" #include "btree_cache.h" #include "btree_io.h" #include "btree_key_cache.h" @@ -1553,13 +1554,13 @@ err: } static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, - struct btree_iter *alloc_iter) + struct btree_iter *alloc_iter, + struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; - struct btree_iter lru_iter; struct bch_alloc_v4 a_convert; const struct bch_alloc_v4 *a; - struct bkey_s_c alloc_k, lru_k; + struct bkey_s_c alloc_k; struct printbuf buf = PRINTBUF; int ret; @@ -1573,6 +1574,14 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, a = bch2_alloc_to_v4(alloc_k, &a_convert); + if (a->fragmentation_lru) { + ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START, + a->fragmentation_lru, + alloc_k, last_flushed); + if (ret) + return ret; + } + if (a->data_type != BCH_DATA_cached) return 0; @@ -1597,41 +1606,30 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, a = &a_mut->v; } - lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, - lru_pos(alloc_k.k->p.inode, - bucket_to_u64(alloc_k.k->p), - a->io_time[READ]), 0); - ret = bkey_err(lru_k); + ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ], + alloc_k, last_flushed); if (ret) - return ret; - - if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c, - alloc_key_to_missing_lru_entry, - "missing lru entry\n" - " %s", - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - ret = bch2_lru_set(trans, - alloc_k.k->p.inode, - bucket_to_u64(alloc_k.k->p), - a->io_time[READ]); - if (ret) - goto err; - } + goto err; err: fsck_err: - bch2_trans_iter_exit(trans, &lru_iter); printbuf_exit(&buf); return ret; } int bch2_check_alloc_to_lru_refs(struct bch_fs *c) { + struct bkey_buf last_flushed; + + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_alloc_to_lru_ref(trans, &iter))); + bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))); + + bch2_bkey_buf_exit(&last_flushed, c); bch_err_fn(c, ret); return ret; } diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 9d3d64746a5b..27d97c22ae27 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -1703,6 +1703,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c) for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++) nr[c->open_buckets[i].data_type]++; + printbuf_tabstops_reset(out); printbuf_tabstop_push(out, 24); percpu_down_read(&c->mark_lock); @@ -1736,6 +1737,7 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++) nr[c->open_buckets[i].data_type]++; + printbuf_tabstops_reset(out); printbuf_tabstop_push(out, 12); printbuf_tabstop_push(out, 16); printbuf_tabstop_push(out, 16); diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 4321f9fb73bd..6d8b1bc90be0 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -434,13 +434,6 @@ int bch2_check_btree_backpointers(struct bch_fs *c) return ret; } -static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r) -{ - return bpos_eq(l.k->p, r.k->p) && - bkey_bytes(l.k) == bkey_bytes(r.k) && - !memcmp(l.v, r.v, bkey_val_bytes(l.k)); -} - struct extents_to_bp_state { struct bpos bucket_start; struct bpos bucket_end; @@ -536,11 +529,8 @@ static int check_bp_exists(struct btree_trans *trans, struct btree_iter other_extent_iter = {}; struct printbuf buf = PRINTBUF; struct bkey_s_c bp_k; - struct bkey_buf tmp; int ret = 0; - bch2_bkey_buf_init(&tmp); - struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket); if (!ca) { prt_str(&buf, "extent for nonexistent device:bucket "); @@ -565,22 +555,9 @@ static int check_bp_exists(struct btree_trans *trans, if (bp_k.k->type != KEY_TYPE_backpointer || memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) { - bch2_bkey_buf_reassemble(&tmp, c, orig_k); - - if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) { - if (bp.level) { - bch2_trans_unlock(trans); - bch2_btree_interior_updates_flush(c); - } - - ret = bch2_btree_write_buffer_flush_sync(trans); - if (ret) - goto err; - - bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k); - ret = -BCH_ERR_transaction_restart_write_buffer_flush; - goto out; - } + ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); + if (ret) + goto err; goto check_existing_bp; } @@ -589,7 +566,6 @@ err: fsck_err: bch2_trans_iter_exit(trans, &other_extent_iter); bch2_trans_iter_exit(trans, &bp_iter); - bch2_bkey_buf_exit(&tmp, c); bch2_dev_put(ca); printbuf_exit(&buf); return ret; @@ -794,6 +770,8 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans, !((1U << btree) & btree_interior_mask)) continue; + bch2_trans_begin(trans); + __for_each_btree_node(trans, iter, btree, btree == start.btree ? start.pos : POS_MIN, 0, depth, BTREE_ITER_prefetch, b, ret) { @@ -905,7 +883,7 @@ static int check_one_backpointer(struct btree_trans *trans, struct bbpos start, struct bbpos end, struct bkey_s_c_backpointer bp, - struct bpos *last_flushed_pos) + struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; struct btree_iter iter; @@ -925,20 +903,18 @@ static int check_one_backpointer(struct btree_trans *trans, if (ret) return ret; - if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) { - *last_flushed_pos = bp.k->p; - ret = bch2_btree_write_buffer_flush_sync(trans) ?: - -BCH_ERR_transaction_restart_write_buffer_flush; - goto out; - } + if (!k.k) { + ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed); + if (ret) + goto out; - if (fsck_err_on(!k.k, c, - backpointer_to_missing_ptr, - "backpointer for missing %s\n %s", - bp.v->level ? "btree node" : "extent", - (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) { - ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p); - goto out; + if (fsck_err(c, backpointer_to_missing_ptr, + "backpointer for missing %s\n %s", + bp.v->level ? "btree node" : "extent", + (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) { + ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p); + goto out; + } } out: fsck_err: @@ -951,14 +927,20 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, struct bbpos start, struct bbpos end) { - struct bpos last_flushed_pos = SPOS_MAX; + struct bkey_buf last_flushed; - return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_one_backpointer(trans, start, end, bkey_s_c_to_backpointer(k), - &last_flushed_pos)); + &last_flushed)); + + bch2_bkey_buf_exit(&last_flushed, trans->c); + return ret; } int bch2_check_backpointers_to_extents(struct bch_fs *c) diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c index 94a1d1982fa8..587d7318a2e8 100644 --- a/fs/bcachefs/bkey.c +++ b/fs/bcachefs/bkey.c @@ -660,8 +660,9 @@ int bch2_bkey_format_invalid(struct bch_fs *c, bch2_bkey_format_field_overflows(f, i)) { unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1)); - u64 packed_max = f->bits_per_field[i] - ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1)) + unsigned packed_bits = min(64, f->bits_per_field[i]); + u64 packed_max = packed_bits + ? ~((~0ULL << 1) << (packed_bits - 1)) : 0; prt_printf(err, "field %u too large: %llu + %llu > %llu", diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index fcd43915df07..936357149cf0 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -194,6 +194,13 @@ static inline struct bpos bkey_max(struct bpos l, struct bpos r) return bkey_gt(l, r) ? l : r; } +static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r) +{ + return bpos_eq(l.k->p, r.k->p) && + bkey_bytes(l.k) == bkey_bytes(r.k) && + !memcmp(l.v, r.v, bkey_val_bytes(l.k)); +} + void bch2_bpos_swab(struct bpos *); void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 0e477a926579..a0deb8266011 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -641,16 +641,30 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in target_depth = 0; /* root */ - mutex_lock(&c->btree_root_lock); - struct btree *b = bch2_btree_id_root(c, btree)->b; - if (!btree_node_fake(b)) { + do { +retry_root: + bch2_trans_begin(trans); + + struct btree_iter iter; + bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, + 0, bch2_btree_id_root(c, btree)->b->c.level, 0); + struct btree *b = bch2_btree_iter_peek_node(&iter); + ret = PTR_ERR_OR_ZERO(b); + if (ret) + goto err_root; + + if (b != btree_node_root(c, b)) { + bch2_trans_iter_exit(trans, &iter); + goto retry_root; + } + gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX)); - ret = lockrestart_do(trans, - bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, - NULL, NULL, bkey_i_to_s_c(&b->key), initial)); + struct bkey_s_c k = bkey_i_to_s_c(&b->key); + ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial); level = b->c.level; - } - mutex_unlock(&c->btree_root_lock); +err_root: + bch2_trans_iter_exit(trans, &iter); + } while (bch2_err_matches(ret, BCH_ERR_transaction_restart)); if (ret) return ret; @@ -903,6 +917,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans, bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true); percpu_up_read(&c->mark_lock); + gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca); + if (fsck_err_on(new.data_type != gc.data_type, c, alloc_key_data_type_wrong, "bucket %llu:%llu gen %u has wrong data_type" @@ -916,23 +932,19 @@ static int bch2_alloc_write_key(struct btree_trans *trans, #define copy_bucket_field(_errtype, _f) \ if (fsck_err_on(new._f != gc._f, c, _errtype, \ "bucket %llu:%llu gen %u data type %s has wrong " #_f \ - ": got %u, should be %u", \ + ": got %llu, should be %llu", \ iter->pos.inode, iter->pos.offset, \ gc.gen, \ bch2_data_type_str(gc.data_type), \ - new._f, gc._f)) \ + (u64) new._f, (u64) gc._f)) \ new._f = gc._f; \ - copy_bucket_field(alloc_key_gen_wrong, - gen); - copy_bucket_field(alloc_key_dirty_sectors_wrong, - dirty_sectors); - copy_bucket_field(alloc_key_cached_sectors_wrong, - cached_sectors); - copy_bucket_field(alloc_key_stripe_wrong, - stripe); - copy_bucket_field(alloc_key_stripe_redundancy_wrong, - stripe_redundancy); + copy_bucket_field(alloc_key_gen_wrong, gen); + copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors); + copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors); + copy_bucket_field(alloc_key_stripe_wrong, stripe); + copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy); + copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru); #undef copy_bucket_field if (!bch2_alloc_v4_cmp(*old, new)) @@ -946,7 +958,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans, a->v = new; /* - * The trigger normally makes sure this is set, but we're not running + * The trigger normally makes sure these are set, but we're not running * triggers: */ if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ]) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 0ed9e6574fcd..19352a08ea20 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -996,7 +996,7 @@ retry_all: bch2_trans_unlock(trans); cond_resched(); - trans->locked = true; + trans_set_locked(trans); if (unlikely(trans->memory_allocation_failure)) { struct closure cl; @@ -3089,7 +3089,8 @@ u32 bch2_trans_begin(struct btree_trans *trans) bch2_trans_srcu_unlock(trans); trans->last_begin_ip = _RET_IP_; - trans->locked = true; + + trans_set_locked(trans); if (trans->restarted) { bch2_btree_path_traverse_all(trans); @@ -3159,7 +3160,6 @@ got_trans: trans->last_begin_time = local_clock(); trans->fn_idx = fn_idx; trans->locking_wait.task = current; - trans->locked = true; trans->journal_replay_not_finished = unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) && atomic_inc_not_zero(&c->journal_keys.ref); @@ -3193,6 +3193,7 @@ got_trans: trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); trans->srcu_lock_time = jiffies; trans->srcu_held = true; + trans_set_locked(trans); closure_init_stack_release(&trans->ref); return trans; diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index d66fff22109a..c51826fd557f 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -231,7 +231,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle) prt_newline(&buf); } - bch2_print_string_as_lines(KERN_ERR, buf.buf); + bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf); printbuf_exit(&buf); BUG(); } @@ -792,7 +792,7 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace) return bch2_trans_relock_fail(trans, path, &f, trace); } - trans->locked = true; + trans_set_locked(trans); out: bch2_trans_verify_locks(trans); return 0; @@ -812,16 +812,14 @@ void bch2_trans_unlock_noassert(struct btree_trans *trans) { __bch2_trans_unlock(trans); - trans->locked = false; - trans->last_unlock_ip = _RET_IP_; + trans_set_unlocked(trans); } void bch2_trans_unlock(struct btree_trans *trans) { __bch2_trans_unlock(trans); - trans->locked = false; - trans->last_unlock_ip = _RET_IP_; + trans_set_unlocked(trans); } void bch2_trans_unlock_long(struct btree_trans *trans) diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 7f41545b9147..75a6274c7d27 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -193,6 +193,28 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p); /* lock: */ +static inline void trans_set_locked(struct btree_trans *trans) +{ + if (!trans->locked) { + trans->locked = true; + trans->last_unlock_ip = 0; + + trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0; + current->flags |= PF_MEMALLOC_NOFS; + } +} + +static inline void trans_set_unlocked(struct btree_trans *trans) +{ + if (trans->locked) { + trans->locked = false; + trans->last_unlock_ip = _RET_IP_; + + if (!trans->pf_memalloc_nofs) + current->flags &= ~PF_MEMALLOC_NOFS; + } +} + static inline int __btree_node_lock_nopath(struct btree_trans *trans, struct btree_bkey_cached_common *b, enum six_lock_type type, diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 87f485e9c552..48cb1a7d31c5 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -484,6 +484,7 @@ struct btree_trans { bool lock_may_not_fail:1; bool srcu_held:1; bool locked:1; + bool pf_memalloc_nofs:1; bool write_locked:1; bool used_mempool:1; bool in_traverse_all:1; diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 75c8a196b3f6..d0e92d948002 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bkey_buf.h" #include "btree_locking.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_write_buffer.h" #include "error.h" +#include "extents.h" #include "journal.h" #include "journal_io.h" #include "journal_reclaim.h" @@ -492,6 +494,41 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans) return ret; } +/** + * In check and repair code, when checking references to write buffer btrees we + * need to issue a flush before we have a definitive error: this issues a flush + * if this is a key we haven't yet checked. + */ +int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans, + struct bkey_s_c referring_k, + struct bkey_buf *last_flushed) +{ + struct bch_fs *c = trans->c; + struct bkey_buf tmp; + int ret = 0; + + bch2_bkey_buf_init(&tmp); + + if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) { + bch2_bkey_buf_reassemble(&tmp, c, referring_k); + + if (bkey_is_btree_ptr(referring_k.k)) { + bch2_trans_unlock(trans); + bch2_btree_interior_updates_flush(c); + } + + ret = bch2_btree_write_buffer_flush_sync(trans); + if (ret) + goto err; + + bch2_bkey_buf_copy(last_flushed, c, tmp.k); + ret = -BCH_ERR_transaction_restart_write_buffer_flush; + } +err: + bch2_bkey_buf_exit(&tmp, c); + return ret; +} + static void bch2_btree_write_buffer_flush_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work); diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h index eebcd2b15249..dd5e64218b50 100644 --- a/fs/bcachefs/btree_write_buffer.h +++ b/fs/bcachefs/btree_write_buffer.h @@ -23,6 +23,9 @@ int bch2_btree_write_buffer_flush_sync(struct btree_trans *); int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *); int bch2_btree_write_buffer_tryflush(struct btree_trans *); +struct bkey_buf; +int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *); + struct journal_keys_to_wb { struct btree_write_buffer_keys *wb; size_t room; diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 743d57eba760..314ee3e0187f 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -805,7 +805,7 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" "while marking %s", ptr->dev, bucket_nr, b_gen, - *bucket_gen(ca, bucket_nr), + bucket_gen_get(ca, bucket_nr), bch2_data_type_str(bucket_data_type ?: ptr_data_type), ptr->gen, (printbuf_reset(&buf), diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index 80ee0be9793e..8ad4be73860c 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -116,6 +116,14 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) return gens->b + b; } +static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b) +{ + rcu_read_lock(); + u8 gen = *bucket_gen(ca, b); + rcu_read_unlock(); + return gen; +} + static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, const struct bch_extent_ptr *ptr) { diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c index 363644451106..0f40b585ce2b 100644 --- a/fs/bcachefs/clock.c +++ b/fs/bcachefs/clock.c @@ -132,14 +132,9 @@ static struct io_timer *get_expired_timer(struct io_clock *clock, { struct io_timer *ret = NULL; - spin_lock(&clock->timer_lock); - if (clock->timers.used && time_after_eq(now, clock->timers.data[0]->expire)) heap_pop(&clock->timers, ret, io_timer_cmp, NULL); - - spin_unlock(&clock->timer_lock); - return ret; } @@ -148,8 +143,10 @@ void __bch2_increment_clock(struct io_clock *clock, unsigned sectors) struct io_timer *timer; unsigned long now = atomic64_add_return(sectors, &clock->now); + spin_lock(&clock->timer_lock); while ((timer = get_expired_timer(clock, now))) timer->fn(timer); + spin_unlock(&clock->timer_lock); } void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock) diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 1a0072eef109..0087b8555ead 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -5,7 +5,9 @@ #include "bkey_buf.h" #include "btree_update.h" #include "buckets.h" +#include "compress.h" #include "data_update.h" +#include "disk_groups.h" #include "ec.h" #include "error.h" #include "extents.h" @@ -454,6 +456,38 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans, } } +void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) +{ + printbuf_tabstop_push(out, 20); + prt_str(out, "rewrite ptrs:\t"); + bch2_prt_u64_base2(out, data_opts->rewrite_ptrs); + prt_newline(out); + + prt_str(out, "kill ptrs:\t"); + bch2_prt_u64_base2(out, data_opts->kill_ptrs); + prt_newline(out); + + prt_str(out, "target:\t"); + bch2_target_to_text(out, c, data_opts->target); + prt_newline(out); + + prt_str(out, "compression:\t"); + bch2_compression_opt_to_text(out, background_compression(*io_opts)); + prt_newline(out); + + prt_str(out, "extra replicas:\t"); + prt_u64(out, data_opts->extra_replicas); +} + +void bch2_data_update_to_text(struct printbuf *out, struct data_update *m) +{ + bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k)); + prt_newline(out); + bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts); +} + int bch2_extent_drop_ptrs(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, @@ -643,6 +677,16 @@ int bch2_data_update_init(struct btree_trans *trans, if (!(durability_have + durability_removing)) m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1); + if (!m->op.nr_replicas) { + struct printbuf buf = PRINTBUF; + + bch2_data_update_to_text(&buf, m); + WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf); + printbuf_exit(&buf); + ret = -BCH_ERR_data_update_done; + goto done; + } + m->op.nr_replicas_required = m->op.nr_replicas; if (reserve_sectors) { diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h index 991095bbd469..8d36365bdea8 100644 --- a/fs/bcachefs/data_update.h +++ b/fs/bcachefs/data_update.h @@ -17,6 +17,9 @@ struct data_update_opts { unsigned write_flags; }; +void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *, + struct bch_io_opts *, struct data_update_opts *); + struct data_update { /* extent being updated: */ enum btree_id btree_id; @@ -27,6 +30,8 @@ struct data_update { struct bch_write_op op; }; +void bch2_data_update_to_text(struct printbuf *, struct data_update *); + int bch2_data_update_index_update(struct bch_write_op *); void bch2_data_update_read_done(struct data_update *, diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index f0d4727c4dc2..ebabab171fe5 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -610,7 +610,7 @@ restart: list_sort(&c->btree_trans_list, list_ptr_order_cmp); list_for_each_entry(trans, &c->btree_trans_list, list) { - if ((ulong) trans < i->iter) + if ((ulong) trans <= i->iter) continue; i->iter = (ulong) trans; @@ -832,16 +832,16 @@ static const struct file_operations btree_transaction_stats_op = { static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c) { struct btree_trans *trans; - pid_t iter = 0; + ulong iter = 0; restart: seqmutex_lock(&c->btree_trans_lock); - list_for_each_entry(trans, &c->btree_trans_list, list) { - struct task_struct *task = READ_ONCE(trans->locking_wait.task); + list_sort(&c->btree_trans_list, list_ptr_order_cmp); - if (!task || task->pid <= iter) + list_for_each_entry(trans, &c->btree_trans_list, list) { + if ((ulong) trans <= iter) continue; - iter = task->pid; + iter = (ulong) trans; if (!closure_get_not_zero(&trans->ref)) continue; diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h index 24840aee335c..795f4fc0bab1 100644 --- a/fs/bcachefs/eytzinger.h +++ b/fs/bcachefs/eytzinger.h @@ -48,7 +48,7 @@ static inline unsigned eytzinger1_right_child(unsigned i) static inline unsigned eytzinger1_first(unsigned size) { - return rounddown_pow_of_two(size); + return size ? rounddown_pow_of_two(size) : 0; } static inline unsigned eytzinger1_last(unsigned size) @@ -101,7 +101,9 @@ static inline unsigned eytzinger1_prev(unsigned i, unsigned size) static inline unsigned eytzinger1_extra(unsigned size) { - return (size + 1 - rounddown_pow_of_two(size)) << 1; + return size + ? (size + 1 - rounddown_pow_of_two(size)) << 1 + : 0; } static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size, diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index f9c9a95d7d4c..fa1fee05cf8f 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -194,6 +194,12 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino * discard_new_inode() expects it to be set... */ inode->v.i_flags |= I_NEW; + /* + * We don't want bch2_evict_inode() to delete the inode on disk, + * we just raced and had another inode in cache. Normally new + * inodes don't have nlink == 0 - except tmpfiles do... + */ + set_nlink(&inode->v, 1); discard_new_inode(&inode->v); inode = old; } else { @@ -2026,6 +2032,8 @@ err_put_super: __bch2_fs_stop(c); deactivate_locked_super(sb); err: + if (ret) + pr_err("error: %s", bch2_err_str(ret)); /* * On an inconsistency error in recovery we might see an -EROFS derived * errorcode (from the journal), but we don't want to return that to diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index 4ec979b4b23e..4583c9386e8c 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -125,7 +125,7 @@ err_noprint: bch2_bkey_buf_exit(&old, c); if (closure_nr_remaining(&cl) != 1) { - bch2_trans_unlock(trans); + bch2_trans_unlock_long(trans); closure_sync(&cl); } diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index c97fa7002b06..ebf39ef72fb2 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -389,7 +389,6 @@ retry: bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - bch2_trans_unlock(trans); if (!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, @@ -1004,6 +1003,9 @@ get_bio: rbio->promote = promote; INIT_WORK(&rbio->work, NULL); + if (flags & BCH_READ_NODECODE) + orig->pick = pick; + rbio->bio.bi_opf = orig->bio.bi_opf; rbio->bio.bi_iter.bi_sector = pick.ptr.offset; rbio->bio.bi_end_io = bch2_read_endio; diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 13669dd0e375..10b19791ec98 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1095,7 +1095,7 @@ unlock: return ret; } -int bch2_dev_journal_alloc(struct bch_dev *ca) +int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs) { unsigned nr; int ret; @@ -1117,7 +1117,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca) min(1 << 13, (1 << 24) / ca->mi.bucket_size)); - ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL); + ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL); err: bch_err_fn(ca, ret); return ret; @@ -1129,7 +1129,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c) if (ca->journal.nr) continue; - int ret = bch2_dev_journal_alloc(ca); + int ret = bch2_dev_journal_alloc(ca, true); if (ret) { percpu_ref_put(&ca->io_ref); return ret; @@ -1184,9 +1184,11 @@ void bch2_fs_journal_stop(struct journal *j) journal_quiesce(j); cancel_delayed_work_sync(&j->write_work); - BUG_ON(!bch2_journal_error(j) && - test_bit(JOURNAL_replay_done, &j->flags) && - j->last_empty_seq != journal_cur_seq(j)); + WARN(!bch2_journal_error(j) && + test_bit(JOURNAL_replay_done, &j->flags) && + j->last_empty_seq != journal_cur_seq(j), + "journal shutdown error: cur seq %llu but last empty seq %llu", + journal_cur_seq(j), j->last_empty_seq); if (!bch2_journal_error(j)) clear_bit(JOURNAL_running, &j->flags); @@ -1418,8 +1420,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) unsigned long now = jiffies; u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes; - if (!out->nr_tabstops) - printbuf_tabstop_push(out, 28); + printbuf_tabstops_reset(out); + printbuf_tabstop_push(out, 28); out->atomic++; rcu_read_lock(); diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index fd1f7cdaa8bc..bc6b9c39dcb4 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -433,7 +433,7 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *); int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *, unsigned nr); -int bch2_dev_journal_alloc(struct bch_dev *); +int bch2_dev_journal_alloc(struct bch_dev *, bool); int bch2_fs_journal_alloc(struct bch_fs *); void bch2_dev_journal_stop(struct journal *, struct bch_dev *); diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index db24ce21b2ac..2326e2cb9cd2 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -415,6 +415,8 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c, flags|BCH_VALIDATE_journal); if (ret == FSCK_DELETED_KEY) continue; + else if (ret) + return ret; k = bkey_next(k); } @@ -1762,11 +1764,13 @@ static CLOSURE_CALLBACK(journal_write_preflush) if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { spin_lock(&j->lock); - closure_wait(&j->async_wait, cl); + if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) { + closure_wait(&j->async_wait, cl); + spin_unlock(&j->lock); + continue_at(cl, journal_write_preflush, j->wq); + return; + } spin_unlock(&j->lock); - - continue_at(cl, journal_write_preflush, j->wq); - return; } if (w->separate_flush) { diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c index a40d116224ed..b12894ef44f3 100644 --- a/fs/bcachefs/lru.c +++ b/fs/bcachefs/lru.c @@ -77,6 +77,45 @@ static const char * const bch2_lru_types[] = { NULL }; +int bch2_lru_check_set(struct btree_trans *trans, + u16 lru_id, u64 time, + struct bkey_s_c referring_k, + struct bkey_buf *last_flushed) +{ + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + struct btree_iter lru_iter; + struct bkey_s_c lru_k = + bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, + lru_pos(lru_id, + bucket_to_u64(referring_k.k->p), + time), 0); + int ret = bkey_err(lru_k); + if (ret) + return ret; + + if (lru_k.k->type != KEY_TYPE_set) { + ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed); + if (ret) + goto err; + + if (fsck_err(c, alloc_key_to_missing_lru_entry, + "missing %s lru entry\n" + " %s", + bch2_lru_types[lru_type(lru_k)], + (bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) { + ret = bch2_lru_set(trans, lru_id, bucket_to_u64(referring_k.k->p), time); + if (ret) + goto err; + } + } +err: +fsck_err: + bch2_trans_iter_exit(trans, &lru_iter); + printbuf_exit(&buf); + return ret; +} + static int bch2_check_lru_key(struct btree_trans *trans, struct btree_iter *lru_iter, struct bkey_s_c lru_k, diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h index bd71ba77de07..ed75bcf59d47 100644 --- a/fs/bcachefs/lru.h +++ b/fs/bcachefs/lru.h @@ -61,6 +61,9 @@ int bch2_lru_del(struct btree_trans *, u16, u64, u64); int bch2_lru_set(struct btree_trans *, u16, u64, u64); int bch2_lru_change(struct btree_trans *, u16, u64, u64, u64); +struct bkey_buf; +int bch2_lru_check_set(struct btree_trans *, u16, u64, struct bkey_s_c, struct bkey_buf *); + int bch2_check_lrus(struct bch_fs *); #endif /* _BCACHEFS_LRU_H */ diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 6e477fadaa2a..e714e3bd5bbb 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -36,31 +36,6 @@ const char * const bch2_data_ops_strs[] = { NULL }; -static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, - struct bch_io_opts *io_opts, - struct data_update_opts *data_opts) -{ - printbuf_tabstop_push(out, 20); - prt_str(out, "rewrite ptrs:\t"); - bch2_prt_u64_base2(out, data_opts->rewrite_ptrs); - prt_newline(out); - - prt_str(out, "kill ptrs:\t"); - bch2_prt_u64_base2(out, data_opts->kill_ptrs); - prt_newline(out); - - prt_str(out, "target:\t"); - bch2_target_to_text(out, c, data_opts->target); - prt_newline(out); - - prt_str(out, "compression:\t"); - bch2_compression_opt_to_text(out, background_compression(*io_opts)); - prt_newline(out); - - prt_str(out, "extra replicas:\t"); - prt_u64(out, data_opts->extra_replicas); -} - static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index d6f35a99c429..d54121ec093f 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -286,7 +286,8 @@ enum bch_fsck_flags { x(accounting_mismatch, 272, 0) \ x(accounting_replicas_not_marked, 273, 0) \ x(invalid_btree_id, 274, 0) \ - x(alloc_key_io_time_bad, 275, 0) + x(alloc_key_io_time_bad, 275, 0) \ + x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index fb906467201e..da735608d47c 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -563,8 +563,11 @@ static void __bch2_fs_free(struct bch_fs *c) BUG_ON(atomic_read(&c->journal_keys.ref)); bch2_fs_btree_write_buffer_exit(c); percpu_free_rwsem(&c->mark_lock); - EBUG_ON(c->online_reserved && percpu_u64_get(c->online_reserved)); - free_percpu(c->online_reserved); + if (c->online_reserved) { + u64 v = percpu_u64_get(c->online_reserved); + WARN(v, "online_reserved not 0 at shutdown: %lli", v); + free_percpu(c->online_reserved); + } darray_exit(&c->btree_roots_extra); free_percpu(c->pcpu); @@ -1769,7 +1772,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err; - ret = bch2_dev_journal_alloc(ca); + ret = bch2_dev_journal_alloc(ca, true); bch_err_msg(c, ret, "allocating journal"); if (ret) goto err; @@ -1929,7 +1932,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path) } if (!ca->journal.nr) { - ret = bch2_dev_journal_alloc(ca); + ret = bch2_dev_journal_alloc(ca, false); bch_err_msg(ca, ret, "allocating journal"); if (ret) goto err; diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c index de331dec2a99..4ec7e44d6e36 100644 --- a/fs/bcachefs/util.c +++ b/fs/bcachefs/util.c @@ -252,8 +252,10 @@ void bch2_prt_u64_base2(struct printbuf *out, u64 v) bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1); } -void bch2_print_string_as_lines(const char *prefix, const char *lines) +static void __bch2_print_string_as_lines(const char *prefix, const char *lines, + bool nonblocking) { + bool locked = false; const char *p; if (!lines) { @@ -261,7 +263,13 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines) return; } - console_lock(); + if (!nonblocking) { + console_lock(); + locked = true; + } else { + locked = console_trylock(); + } + while (1) { p = strchrnul(lines, '\n'); printk("%s%.*s\n", prefix, (int) (p - lines), lines); @@ -269,7 +277,18 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines) break; lines = p + 1; } - console_unlock(); + if (locked) + console_unlock(); +} + +void bch2_print_string_as_lines(const char *prefix, const char *lines) +{ + return __bch2_print_string_as_lines(prefix, lines, false); +} + +void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines) +{ + return __bch2_print_string_as_lines(prefix, lines, true); } int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr, diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index 5d2c470a49ac..5b0533ec4c7e 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -315,6 +315,7 @@ void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned); void bch2_prt_u64_base2(struct printbuf *, u64); void bch2_print_string_as_lines(const char *prefix, const char *lines); +void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines); typedef DARRAY(unsigned long) bch_stacktrace; int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 38cdb8875e8e..cabb558dbdaa 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2856,6 +2856,8 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block if (ret) return ret; + spin_lock_init(&fs_info->extent_map_shrinker_lock); + ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); if (ret) return ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f688fab55251..958155cc43a8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3553,7 +3553,7 @@ err: for (int i = 0; i < num_folios; i++) { if (eb->folios[i]) { detach_extent_buffer_folio(eb, eb->folios[i]); - __folio_put(eb->folios[i]); + folio_put(eb->folios[i]); } } __free_extent_buffer(eb); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 744e8952abb0..b4c9a6aa118c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -1028,7 +1028,14 @@ out_free_pre: return ret; } -static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_to_scan) +struct btrfs_em_shrink_ctx { + long nr_to_scan; + long scanned; + u64 last_ino; + u64 last_root; +}; + +static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx) { const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info); struct extent_map_tree *tree = &inode->extent_tree; @@ -1057,14 +1064,25 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_t if (!down_read_trylock(&inode->i_mmap_lock)) return 0; - write_lock(&tree->lock); + /* + * We want to be fast because we can be called from any path trying to + * allocate memory, so if the lock is busy we don't want to spend time + * waiting for it - either some task is about to do IO for the inode or + * we may have another task shrinking extent maps, here in this code, so + * skip this inode. + */ + if (!write_trylock(&tree->lock)) { + up_read(&inode->i_mmap_lock); + return 0; + } + node = rb_first_cached(&tree->map); while (node) { struct extent_map *em; em = rb_entry(node, struct extent_map, rb_node); node = rb_next(node); - (*scanned)++; + ctx->scanned++; if (em->flags & EXTENT_FLAG_PINNED) goto next; @@ -1085,16 +1103,18 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_t free_extent_map(em); nr_dropped++; next: - if (*scanned >= nr_to_scan) + if (ctx->scanned >= ctx->nr_to_scan) break; /* - * Restart if we had to reschedule, and any extent maps that were - * pinned before may have become unpinned after we released the - * lock and took it again. + * Stop if we need to reschedule or there's contention on the + * lock. This is to avoid slowing other tasks trying to take the + * lock and because the shrinker might be called during a memory + * allocation path and we want to avoid taking a very long time + * and slowing down all sorts of tasks. */ - if (cond_resched_rwlock_write(&tree->lock)) - node = rb_first_cached(&tree->map); + if (need_resched() || rwlock_needbreak(&tree->lock)) + break; } write_unlock(&tree->lock); up_read(&inode->i_mmap_lock); @@ -1102,25 +1122,30 @@ next: return nr_dropped; } -static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_scan) +static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) { - struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_inode *inode; long nr_dropped = 0; - u64 min_ino = fs_info->extent_map_shrinker_last_ino + 1; + u64 min_ino = ctx->last_ino + 1; inode = btrfs_find_first_inode(root, min_ino); while (inode) { - nr_dropped += btrfs_scan_inode(inode, scanned, nr_to_scan); + nr_dropped += btrfs_scan_inode(inode, ctx); min_ino = btrfs_ino(inode) + 1; - fs_info->extent_map_shrinker_last_ino = btrfs_ino(inode); - iput(&inode->vfs_inode); + ctx->last_ino = btrfs_ino(inode); + btrfs_add_delayed_iput(inode); - if (*scanned >= nr_to_scan) + if (ctx->scanned >= ctx->nr_to_scan) + break; + + /* + * We may be called from memory allocation paths, so we don't + * want to take too much time and slowdown tasks. + */ + if (need_resched()) break; - cond_resched(); inode = btrfs_find_first_inode(root, min_ino); } @@ -1132,14 +1157,14 @@ static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_s * inode if there is one or we will find out this was the last * one and move to the next root. */ - fs_info->extent_map_shrinker_last_root = btrfs_root_id(root); + ctx->last_root = btrfs_root_id(root); } else { /* * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so * that when processing the next root we start from its first inode. */ - fs_info->extent_map_shrinker_last_ino = 0; - fs_info->extent_map_shrinker_last_root = btrfs_root_id(root) + 1; + ctx->last_ino = 0; + ctx->last_root = btrfs_root_id(root) + 1; } return nr_dropped; @@ -1147,19 +1172,41 @@ static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_s long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) { - const u64 start_root_id = fs_info->extent_map_shrinker_last_root; - u64 next_root_id = start_root_id; + struct btrfs_em_shrink_ctx ctx; + u64 start_root_id; + u64 next_root_id; bool cycled = false; long nr_dropped = 0; - long scanned = 0; + + ctx.scanned = 0; + ctx.nr_to_scan = nr_to_scan; + + /* + * In case we have multiple tasks running this shrinker, make the next + * one start from the next inode in case it starts before we finish. + */ + spin_lock(&fs_info->extent_map_shrinker_lock); + ctx.last_ino = fs_info->extent_map_shrinker_last_ino; + fs_info->extent_map_shrinker_last_ino++; + ctx.last_root = fs_info->extent_map_shrinker_last_root; + spin_unlock(&fs_info->extent_map_shrinker_lock); + + start_root_id = ctx.last_root; + next_root_id = ctx.last_root; if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) { s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); - trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, nr); + trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, + nr, ctx.last_root, + ctx.last_ino); } - while (scanned < nr_to_scan) { + /* + * We may be called from memory allocation paths, so we don't want to + * take too much time and slowdown tasks, so stop if we need reschedule. + */ + while (ctx.scanned < ctx.nr_to_scan && !need_resched()) { struct btrfs_root *root; unsigned long count; @@ -1171,8 +1218,8 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) spin_unlock(&fs_info->fs_roots_radix_lock); if (start_root_id > 0 && !cycled) { next_root_id = 0; - fs_info->extent_map_shrinker_last_root = 0; - fs_info->extent_map_shrinker_last_ino = 0; + ctx.last_root = 0; + ctx.last_ino = 0; cycled = true; continue; } @@ -1186,15 +1233,33 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) continue; if (is_fstree(btrfs_root_id(root))) - nr_dropped += btrfs_scan_root(root, &scanned, nr_to_scan); + nr_dropped += btrfs_scan_root(root, &ctx); btrfs_put_root(root); } + /* + * In case of multiple tasks running this extent map shrinking code this + * isn't perfect but it's simple and silences things like KCSAN. It's + * not possible to know which task made more progress because we can + * cycle back to the first root and first inode if it's not the first + * time the shrinker ran, see the above logic. Also a task that started + * later may finish ealier than another task and made less progress. So + * make this simple and update to the progress of the last task that + * finished, with the occasional possiblity of having two consecutive + * runs of the shrinker process the same inodes. + */ + spin_lock(&fs_info->extent_map_shrinker_lock); + fs_info->extent_map_shrinker_last_ino = ctx.last_ino; + fs_info->extent_map_shrinker_last_root = ctx.last_root; + spin_unlock(&fs_info->extent_map_shrinker_lock); + if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) { s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); - trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr); + trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, + nr, ctx.last_root, + ctx.last_ino); } return nr_dropped; diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h index 89f0650631cd..833dc3fe0a38 100644 --- a/fs/btrfs/fs.h +++ b/fs/btrfs/fs.h @@ -630,6 +630,7 @@ struct btrfs_fs_info { s32 delalloc_batch; struct percpu_counter evictable_extent_maps; + spinlock_t extent_map_shrinker_lock; u64 extent_map_shrinker_last_root; u64 extent_map_shrinker_last_ino; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 753db965f7c0..3a2b902b2d1f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -10385,7 +10385,7 @@ out_unlock: out_folios: for (i = 0; i < nr_folios; i++) { if (folios[i]) - __folio_put(folios[i]); + folio_put(folios[i]); } kvfree(folios); out: diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index bf0f81d59b6b..39a15cca58ca 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3062,8 +3062,6 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, struct btrfs_qgroup_inherit *inherit, size_t size) { - if (!btrfs_qgroup_enabled(fs_info)) - return 0; if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) return -EOPNOTSUPP; if (size < sizeof(*inherit) || size > PAGE_SIZE) @@ -3085,6 +3083,14 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, return -EINVAL; /* + * Skip the inherit source qgroups check if qgroup is not enabled. + * Qgroup can still be later enabled causing problems, but in that case + * btrfs_qgroup_inherit() would just ignore those invalid ones. + */ + if (!btrfs_qgroup_enabled(fs_info)) + return 0; + + /* * Now check all the remaining qgroups, they should all: * * - Exist diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index cf531255ab76..9522a8b79d22 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -441,7 +441,8 @@ static int process_extent_item(struct btrfs_fs_info *fs_info, u32 item_size = btrfs_item_size(leaf, slot); unsigned long end, ptr; u64 offset, flags, count; - int type, ret; + int type; + int ret = 0; ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(leaf, ei); @@ -486,7 +487,11 @@ static int process_extent_item(struct btrfs_fs_info *fs_info, key->objectid, key->offset); break; case BTRFS_EXTENT_OWNER_REF_KEY: - WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); + if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) { + btrfs_err(fs_info, + "found extent owner ref without simple quotas enabled"); + ret = -EINVAL; + } break; default: btrfs_err(fs_info, "invalid key type in iref"); diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c index d620323d08ea..ae8c56442549 100644 --- a/fs/btrfs/space-info.c +++ b/fs/btrfs/space-info.c @@ -373,11 +373,18 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, * "optimal" chunk size based on the fs size. However when we actually * allocate the chunk we will strip this down further, making it no more * than 10% of the disk or 1G, whichever is smaller. + * + * On the zoned mode, we need to use zone_size (= + * data_sinfo->chunk_size) as it is. */ data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); - data_chunk_size = min(data_sinfo->chunk_size, - mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); - data_chunk_size = min_t(u64, data_chunk_size, SZ_1G); + if (!btrfs_is_zoned(fs_info)) { + data_chunk_size = min(data_sinfo->chunk_size, + mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); + data_chunk_size = min_t(u64, data_chunk_size, SZ_1G); + } else { + data_chunk_size = data_sinfo->chunk_size; + } /* * Since data allocations immediately use block groups as part of the @@ -405,6 +412,17 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, avail >>= 3; else avail >>= 1; + + /* + * On the zoned mode, we always allocate one zone as one chunk. + * Returning non-zone size alingned bytes here will result in + * less pressure for the async metadata reclaim process, and it + * will over-commit too much leading to ENOSPC. Align down to the + * zone size to avoid that. + */ + if (btrfs_is_zoned(fs_info)) + avail = ALIGN_DOWN(avail, fs_info->zone_size); + return avail; } diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c index f449f7340aad..9fb06dc16520 100644 --- a/fs/cachefiles/cache.c +++ b/fs/cachefiles/cache.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/statfs.h> #include <linux/namei.h> +#include <trace/events/fscache.h> #include "internal.h" /* @@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache) } /* - * Withdraw volumes. + * Withdraw fscache volumes. + */ +static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache) +{ + struct list_head *cur; + struct cachefiles_volume *volume; + struct fscache_volume *vcookie; + + _enter(""); +retry: + spin_lock(&cache->object_list_lock); + list_for_each(cur, &cache->volumes) { + volume = list_entry(cur, struct cachefiles_volume, cache_link); + + if (atomic_read(&volume->vcookie->n_accesses) == 0) + continue; + + vcookie = fscache_try_get_volume(volume->vcookie, + fscache_volume_get_withdraw); + if (vcookie) { + spin_unlock(&cache->object_list_lock); + fscache_withdraw_volume(vcookie); + fscache_put_volume(vcookie, fscache_volume_put_withdraw); + goto retry; + } + } + spin_unlock(&cache->object_list_lock); + + _leave(""); +} + +/* + * Withdraw cachefiles volumes. */ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache) { _enter(""); for (;;) { + struct fscache_volume *vcookie = NULL; struct cachefiles_volume *volume = NULL; spin_lock(&cache->object_list_lock); if (!list_empty(&cache->volumes)) { volume = list_first_entry(&cache->volumes, struct cachefiles_volume, cache_link); + vcookie = fscache_try_get_volume(volume->vcookie, + fscache_volume_get_withdraw); + if (!vcookie) { + spin_unlock(&cache->object_list_lock); + cpu_relax(); + continue; + } list_del_init(&volume->cache_link); } spin_unlock(&cache->object_list_lock); @@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache) break; cachefiles_withdraw_volume(volume); + fscache_put_volume(vcookie, fscache_volume_put_withdraw); } _leave(""); @@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache) pr_info("File cache on %s unregistering\n", fscache->name); fscache_withdraw_cache(fscache); + cachefiles_withdraw_fscache_volumes(cache); /* we now have to destroy all the active objects pertaining to this * cache - which we do by passing them off to thread pool to be diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 06cdf1a8a16f..89b11336a836 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -366,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file, if (cachefiles_in_ondemand_mode(cache)) { if (!xa_empty(&cache->reqs)) { - rcu_read_lock(); + xas_lock(&xas); xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { if (!cachefiles_ondemand_is_reopening_read(req)) { mask |= EPOLLIN; break; } } - rcu_read_unlock(); + xas_unlock(&xas); } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 6845a90cdfcc..7b99bd98de75 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -48,6 +48,7 @@ enum cachefiles_object_state { CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ + CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */ }; struct cachefiles_ondemand_info { @@ -128,6 +129,7 @@ struct cachefiles_cache { unsigned long req_id_next; struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ u32 ondemand_id_next; + u32 msg_id_next; }; static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) @@ -335,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); +CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING); static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) { diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index bce005f2b456..470c96658385 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -517,7 +517,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, */ xas_lock(&xas); - if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + if (test_bit(CACHEFILES_DEAD, &cache->flags) || + cachefiles_ondemand_object_is_dropping(object)) { xas_unlock(&xas); ret = -EIO; goto out; @@ -527,20 +528,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, smp_mb(); if (opcode == CACHEFILES_OP_CLOSE && - !cachefiles_ondemand_object_is_open(object)) { + !cachefiles_ondemand_object_is_open(object)) { WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; goto out; } - xas.xa_index = 0; + /* + * Cyclically find a free xas to avoid msg_id reuse that would + * cause the daemon to successfully copen a stale msg_id. + */ + xas.xa_index = cache->msg_id_next; xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); + if (xas.xa_node == XAS_RESTART) { + xas.xa_index = 0; + xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK); + } if (xas.xa_node == XAS_RESTART) xas_set_err(&xas, -EBUSY); + xas_store(&xas, req); - xas_clear_mark(&xas, XA_FREE_MARK); - xas_set_mark(&xas, CACHEFILES_REQ_NEW); + if (xas_valid(&xas)) { + cache->msg_id_next = xas.xa_index + 1; + xas_clear_mark(&xas, XA_FREE_MARK); + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + } xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); @@ -568,7 +581,8 @@ out: * If error occurs after creating the anonymous fd, * cachefiles_ondemand_fd_release() will set object to close. */ - if (opcode == CACHEFILES_OP_OPEN) + if (opcode == CACHEFILES_OP_OPEN && + !cachefiles_ondemand_object_is_dropping(object)) cachefiles_ondemand_set_object_close(object); kfree(req); return ret; @@ -667,8 +681,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) void cachefiles_ondemand_clean_object(struct cachefiles_object *object) { + unsigned long index; + struct cachefiles_req *req; + struct cachefiles_cache *cache; + + if (!object->ondemand) + return; + cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, cachefiles_ondemand_init_close_req, NULL); + + if (!object->ondemand->ondemand_id) + return; + + /* Cancel all requests for the object that is being dropped. */ + cache = object->volume->cache; + xa_lock(&cache->reqs); + cachefiles_ondemand_set_object_dropping(object); + xa_for_each(&cache->reqs, index, req) { + if (req->object == object) { + req->error = -EIO; + complete(&req->done); + __xa_erase(&cache->reqs, index); + } + } + xa_unlock(&cache->reqs); + + /* Wait for ondemand_object_worker() to finish to avoid UAF. */ + cancel_work_sync(&object->ondemand->ondemand_work); } int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c index 89df0ba8ba5e..781aac4ef274 100644 --- a/fs/cachefiles/volume.c +++ b/fs/cachefiles/volume.c @@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie) void cachefiles_withdraw_volume(struct cachefiles_volume *volume) { - fscache_withdraw_volume(volume->vcookie); cachefiles_set_volume_xattr(volume); __cachefiles_free_volume(volume); } diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index bcb6173943ee..4dd8a993c60a 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file if (xlen == 0) xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen); if (xlen != tlen) { - if (xlen < 0) + if (xlen < 0) { + ret = xlen; trace_cachefiles_vfs_error(object, file_inode(file), xlen, cachefiles_trace_getxattr_error); + } if (xlen == -EIO) cachefiles_io_error_obj( object, @@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume) xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len); if (xlen != len) { if (xlen < 0) { + ret = xlen; trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen, cachefiles_trace_getxattr_error); if (xlen == -EIO) diff --git a/fs/dcache.c b/fs/dcache.c index d58dc9e58f3b..4c144519aa70 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -355,7 +355,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry) flags &= ~DCACHE_ENTRY_TYPE; WRITE_ONCE(dentry->d_flags, flags); dentry->d_inode = NULL; - if (flags & DCACHE_LRU_LIST) + /* + * The negative counter only tracks dentries on the LRU. Don't inc if + * d_lru is on another list. + */ + if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST) this_cpu_inc(nr_dentry_negative); } @@ -1844,9 +1848,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) spin_lock(&dentry->d_lock); /* - * Decrement negative dentry count if it was in the LRU list. + * The negative counter only tracks dentries on the LRU. Don't dec if + * d_lru is on another list. */ - if (dentry->d_flags & DCACHE_LRU_LIST) + if ((dentry->d_flags & + (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST) this_cpu_dec(nr_dentry_negative); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); raw_write_seqcount_begin(&dentry->d_seq); diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index 5a400259ae74..9a1a93e3888b 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -696,7 +696,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size) return err; } - strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + + strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL); if (!strbuf) { res = -ENOMEM; diff --git a/fs/locks.c b/fs/locks.c index c360d1992d21..bdd94c32256f 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1367,9 +1367,9 @@ retry: locks_wake_up_blocks(&left->c); } out: + trace_posix_lock_inode(inode, request, error); spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); - trace_posix_lock_inode(inode, request, error); /* * Free any unused locks. */ diff --git a/fs/minix/namei.c b/fs/minix/namei.c index d6031acc34f0..a944a0f17b53 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -213,8 +213,7 @@ static int minix_rename(struct mnt_idmap *idmap, if (!new_de) goto out_dir; err = minix_set_link(new_de, new_page, old_inode); - kunmap(new_page); - put_page(new_page); + unmap_and_put_page(new_page, new_de); if (err) goto out_dir; inode_set_ctime_current(new_inode); diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index a6bb03bea920..4c0401dbbfcf 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (folio->index == rreq->no_unlock_folio && test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) - _debug("no unlock"); + kdebug("no unlock"); else folio_unlock(folio); } @@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl) struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); int ret; - _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); + kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); if (readahead_count(ractl) == 0) return; @@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) struct folio *sink = NULL; int ret; - _enter("%lx", folio->index); + kenter("%lx", folio->index); rreq = netfs_alloc_request(mapping, file, folio_file_pos(folio), folio_size(folio), @@ -508,7 +508,7 @@ retry: have_folio: *_folio = folio; - _leave(" = 0"); + kleave(" = 0"); return 0; error_put: @@ -518,7 +518,7 @@ error: folio_unlock(folio); folio_put(folio); } - _leave(" = %d", ret); + kleave(" = %d", ret); return ret; } EXPORT_SYMBOL(netfs_write_begin); @@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, size_t flen = folio_size(folio); int ret; - _enter("%zx @%llx", flen, start); + kenter("%zx @%llx", flen, start); ret = -ENOMEM; @@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, error_put: netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); error: - _leave(" = %d", ret); + kleave(" = %d", ret); return ret; } diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index d583af7a2209..ecbc99ec7d36 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, struct netfs_group *group = netfs_folio_group(folio); loff_t pos = folio_file_pos(folio); - _enter(""); + kenter(""); if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) return NETFS_FLUSH_CONTENT; @@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, */ howto = netfs_how_to_modify(ctx, file, folio, netfs_group, flen, offset, part, maybe_trouble); - _debug("howto %u", howto); + kdebug("howto %u", howto); switch (howto) { case NETFS_JUST_PREFETCH: ret = netfs_prefetch_for_write(file, folio, offset, part); if (ret < 0) { - _debug("prefetch = %zd", ret); + kdebug("prefetch = %zd", ret); goto error_folio_unlock; } break; @@ -418,7 +418,7 @@ out: } iocb->ki_pos += written; - _leave(" = %zd [%zd]", written, ret); + kleave(" = %zd [%zd]", written, ret); return written ? written : ret; error_folio_unlock: @@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct netfs_inode *ictx = netfs_inode(inode); ssize_t ret; - _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); + kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); if (!iov_iter_count(from)) return 0; @@ -529,7 +529,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr vm_fault_t ret = VM_FAULT_RETRY; int err; - _enter("%lx", folio->index); + kenter("%lx", folio->index); sb_start_pagefault(inode->i_sb); diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c index 10a1e4da6bda..b6debac6205f 100644 --- a/fs/netfs/direct_read.c +++ b/fs/netfs/direct_read.c @@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i size_t orig_count = iov_iter_count(iter); bool async = !is_sync_kiocb(iocb); - _enter(""); + kenter(""); if (!orig_count) return 0; /* Don't update atime */ diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index 88f2adfab75e..792ef17bae21 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * size_t len = iov_iter_count(iter); bool async = !is_sync_kiocb(iocb); - _enter(""); + kenter(""); /* We're going to need a bounce buffer if what we transmit is going to * be different in some way to the source buffer, e.g. because it gets @@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * */ // TODO - _debug("uw %llx-%llx", start, end); + kdebug("uw %llx-%llx", start, end); wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start, iocb->ki_flags & IOCB_DIRECT ? @@ -96,7 +96,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * wreq->cleanup = netfs_cleanup_dio_write; ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len); if (ret < 0) { - _debug("begin = %zd", ret); + kdebug("begin = %zd", ret); goto out; } @@ -143,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) loff_t pos = iocb->ki_pos; unsigned long long end = pos + iov_iter_count(from) - 1; - _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode)); + kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode)); if (!iov_iter_count(from)) return 0; diff --git a/fs/netfs/fscache_cache.c b/fs/netfs/fscache_cache.c index 9397ed39b0b4..288a73c3072d 100644 --- a/fs/netfs/fscache_cache.c +++ b/fs/netfs/fscache_cache.c @@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache, { int n_accesses; - _enter("{%s,%s}", ops->name, cache->name); + kenter("{%s,%s}", ops->name, cache->name); BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING); @@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache, up_write(&fscache_addremove_sem); pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name); - _leave(" = 0 [%s]", cache->name); + kleave(" = 0 [%s]", cache->name); return 0; } EXPORT_SYMBOL(fscache_add_cache); diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c index bce2492186d0..4d1e8bf4c615 100644 --- a/fs/netfs/fscache_cookie.c +++ b/fs/netfs/fscache_cookie.c @@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie( { struct fscache_cookie *cookie; - _enter("V=%x", volume->debug_id); + kenter("V=%x", volume->debug_id); if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255) return NULL; @@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie( trace_fscache_acquire(cookie); fscache_stat(&fscache_n_acquires_ok); - _leave(" = c=%08x", cookie->debug_id); + kleave(" = c=%08x", cookie->debug_id); return cookie; } EXPORT_SYMBOL(__fscache_acquire_cookie); @@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie) enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed; bool need_withdraw = false; - _enter(""); + kenter(""); if (!cookie->volume->cache_priv) { fscache_create_volume(cookie->volume, true); @@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie) if (cookie->state != FSCACHE_COOKIE_STATE_FAILED) fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); need_withdraw = true; - _leave(" [fail]"); + kleave(" [fail]"); goto out; } @@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify) bool queue = false; int n_active; - _enter("c=%08x", cookie->debug_id); + kenter("c=%08x", cookie->debug_id); if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), "Trying to use relinquished cookie\n")) @@ -636,7 +636,7 @@ again: spin_unlock(&cookie->lock); if (queue) fscache_queue_cookie(cookie, fscache_cookie_get_use_work); - _leave(""); + kleave(""); } EXPORT_SYMBOL(__fscache_use_cookie); @@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie) enum fscache_cookie_state state; bool wake = false; - _enter("c=%x", cookie->debug_id); + kenter("c=%x", cookie->debug_id); again: spin_lock(&cookie->lock); @@ -820,7 +820,7 @@ out: spin_unlock(&cookie->lock); if (wake) wake_up_cookie_state(cookie); - _leave(""); + kleave(""); } static void fscache_cookie_worker(struct work_struct *work) @@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie) set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); spin_unlock(&cookie->lock); fscache_stat(&fscache_n_cookies_lru_expired); - _debug("lru c=%x", cookie->debug_id); + kdebug("lru c=%x", cookie->debug_id); __fscache_withdraw_cookie(cookie); } @@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) if (retire) fscache_stat(&fscache_n_relinquishes_retire); - _enter("c=%08x{%d},%d", + kenter("c=%08x{%d},%d", cookie->debug_id, atomic_read(&cookie->n_active), retire); if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), @@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, { bool is_caching; - _enter("c=%x", cookie->debug_id); + kenter("c=%x", cookie->debug_id); fscache_stat(&fscache_n_invalidates); @@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */ default: spin_unlock(&cookie->lock); - _leave(" [no %u]", cookie->state); + kleave(" [no %u]", cookie->state); return; case FSCACHE_COOKIE_STATE_LOOKING_UP: @@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, fallthrough; case FSCACHE_COOKIE_STATE_CREATING: spin_unlock(&cookie->lock); - _leave(" [look %x]", cookie->inval_counter); + kleave(" [look %x]", cookie->inval_counter); return; case FSCACHE_COOKIE_STATE_ACTIVE: @@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, if (is_caching) fscache_queue_cookie(cookie, fscache_cookie_get_inval_work); - _leave(" [inv]"); + kleave(" [inv]"); return; } } diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c index 38637e5c9b57..bf4eaeec44fb 100644 --- a/fs/netfs/fscache_io.c +++ b/fs/netfs/fscache_io.c @@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres, again: if (!fscache_cache_is_live(cookie->volume->cache)) { - _leave(" [broken]"); + kleave(" [broken]"); return false; } state = fscache_cookie_state(cookie); - _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); + kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state); switch (state) { case FSCACHE_COOKIE_STATE_CREATING: @@ -52,7 +52,7 @@ again: case FSCACHE_COOKIE_STATE_DROPPED: case FSCACHE_COOKIE_STATE_RELINQUISHING: default: - _leave(" [not live]"); + kleave(" [not live]"); return false; } @@ -92,7 +92,7 @@ again: spin_lock(&cookie->lock); state = fscache_cookie_state(cookie); - _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); + kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state); switch (state) { case FSCACHE_COOKIE_STATE_LOOKING_UP: @@ -140,7 +140,7 @@ failed: cres->cache_priv = NULL; cres->ops = NULL; fscache_end_cookie_access(cookie, fscache_access_io_not_live); - _leave(" = -ENOBUFS"); + kleave(" = -ENOBUFS"); return -ENOBUFS; } @@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, if (len == 0) goto abandon; - _enter("%llx,%zx", start, len); + kenter("%llx,%zx", start, len); wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS); if (!wreq) diff --git a/fs/netfs/fscache_main.c b/fs/netfs/fscache_main.c index 42e98bb523e3..bf9b33d26e31 100644 --- a/fs/netfs/fscache_main.c +++ b/fs/netfs/fscache_main.c @@ -99,7 +99,7 @@ error_wq: */ void __exit fscache_exit(void) { - _enter(""); + kenter(""); kmem_cache_destroy(fscache_cookie_jar); fscache_proc_cleanup(); diff --git a/fs/netfs/fscache_volume.c b/fs/netfs/fscache_volume.c index cdf991bdd9de..2e2a405ca9b0 100644 --- a/fs/netfs/fscache_volume.c +++ b/fs/netfs/fscache_volume.c @@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, return volume; } +struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume, + enum fscache_volume_trace where) +{ + int ref; + + if (!__refcount_inc_not_zero(&volume->ref, &ref)) + return NULL; + + trace_fscache_volume(volume->debug_id, ref + 1, where); + return volume; +} +EXPORT_SYMBOL(fscache_try_get_volume); + static void fscache_see_volume(struct fscache_volume *volume, enum fscache_volume_trace where) { @@ -251,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key, fscache_see_volume(volume, fscache_volume_new_acquire); fscache_stat(&fscache_n_volumes); up_write(&fscache_addremove_sem); - _leave(" = v=%x", volume->debug_id); + kleave(" = v=%x", volume->debug_id); return volume; err_vol: @@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume, fscache_free_volume(volume); } } +EXPORT_SYMBOL(fscache_put_volume); /* * Relinquish a volume representation cookie. @@ -452,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume) { int n_accesses; - _debug("withdraw V=%x", volume->debug_id); + kdebug("withdraw V=%x", volume->debug_id); /* Allow wakeups on dec-to-0 */ n_accesses = atomic_dec_return(&volume->n_accesses); diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index acd9ca14e264..21e46bc9aa49 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -34,7 +34,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync); /* * main.c */ -extern unsigned int netfs_debug; extern struct list_head netfs_io_requests; extern spinlock_t netfs_proc_lock; extern mempool_t netfs_request_pool; @@ -344,8 +343,6 @@ extern const struct seq_operations fscache_volumes_seq_ops; struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, enum fscache_volume_trace where); -void fscache_put_volume(struct fscache_volume *volume, - enum fscache_volume_trace where); bool fscache_begin_volume_access(struct fscache_volume *volume, struct fscache_cookie *cookie, enum fscache_access_trace why); @@ -356,42 +353,12 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait); * debug tracing */ #define dbgprintk(FMT, ...) \ - printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) + pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) -#ifdef __KDEBUG -#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) -#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) -#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) - -#elif defined(CONFIG_NETFS_DEBUG) -#define _enter(FMT, ...) \ -do { \ - if (netfs_debug) \ - kenter(FMT, ##__VA_ARGS__); \ -} while (0) - -#define _leave(FMT, ...) \ -do { \ - if (netfs_debug) \ - kleave(FMT, ##__VA_ARGS__); \ -} while (0) - -#define _debug(FMT, ...) \ -do { \ - if (netfs_debug) \ - kdebug(FMT, ##__VA_ARGS__); \ -} while (0) - -#else -#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) -#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) -#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) -#endif - /* * assertions */ diff --git a/fs/netfs/io.c b/fs/netfs/io.c index c93851b98368..c7576481c321 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq, if (count == remaining) return; - _debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n", + kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n", rreq->debug_id, subreq->debug_index, iov_iter_count(&subreq->io_iter), subreq->transferred, subreq->len, rreq->i_size, @@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, struct netfs_io_request *rreq = subreq->rreq; int u; - _enter("R=%x[%x]{%llx,%lx},%zd", + kenter("R=%x[%x]{%llx,%lx},%zd", rreq->debug_id, subreq->debug_index, subreq->start, subreq->flags, transferred_or_error); @@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq, struct netfs_inode *ictx = netfs_inode(rreq->inode); size_t lsize; - _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); + kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); if (rreq->origin != NETFS_DIO_READ) { source = netfs_cache_prepare_read(subreq, rreq->i_size); @@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, subreq->start = rreq->start + rreq->submitted; subreq->len = io_iter->count; - _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted); + kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted); list_add_tail(&subreq->rreq_link, &rreq->subrequests); /* Call out to the cache to find out what it can do with the remaining @@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) struct iov_iter io_iter; int ret; - _enter("R=%x %llx-%llx", + kenter("R=%x %llx-%llx", rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); if (rreq->len == 0) { @@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) atomic_set(&rreq->nr_outstanding, 1); io_iter = rreq->io_iter; do { - _debug("submit %llx + %llx >= %llx", + kdebug("submit %llx + %llx >= %llx", rreq->start, rreq->submitted, rreq->i_size); if (rreq->origin == NETFS_DIO_READ && rreq->start + rreq->submitted >= rreq->i_size) diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 5f0f438e5d21..db824c372842 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -20,10 +20,6 @@ MODULE_LICENSE("GPL"); EXPORT_TRACEPOINT_SYMBOL(netfs_sreq); -unsigned netfs_debug; -module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask"); - static struct kmem_cache *netfs_request_slab; static struct kmem_cache *netfs_subrequest_slab; mempool_t netfs_request_pool; diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 83e644bd518f..172808e83ca8 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -26,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio) struct fscache_cookie *cookie = netfs_i_cookie(ictx); bool need_use = false; - _enter(""); + kenter(""); if (!filemap_dirty_folio(mapping, folio)) return false; @@ -99,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) struct netfs_folio *finfo; size_t flen = folio_size(folio); - _enter("{%lx},%zx,%zx", folio->index, offset, length); + kenter("{%lx},%zx,%zx", folio->index, offset, length); if (!folio_test_private(folio)) return; diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index 426cf87aaf2e..488147439fe0 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, { struct list_head *next; - _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); + kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); if (list_empty(&stream->subrequests)) return; @@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq) unsigned int notes; int s; - _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); + kenter("%llx-%llx", wreq->start, wreq->start + wreq->len); trace_netfs_collect(wreq); trace_netfs_rreq(wreq, netfs_rreq_trace_collect); @@ -409,7 +409,7 @@ reassess_streams: front = stream->front; while (front) { trace_netfs_collect_sreq(wreq, front); - //_debug("sreq [%x] %llx %zx/%zx", + //kdebug("sreq [%x] %llx %zx/%zx", // front->debug_index, front->start, front->transferred, front->len); /* Stall if there may be a discontinuity. */ @@ -598,7 +598,7 @@ reassess_streams: out: netfs_put_group_many(wreq->group, wreq->nr_group_rel); wreq->nr_group_rel = 0; - _leave(" = %x", notes); + kleave(" = %x", notes); return; need_retry: @@ -606,7 +606,7 @@ need_retry: * that any partially completed op will have had any wholly transferred * folios removed from it. */ - _debug("retry"); + kdebug("retry"); netfs_retry_writes(wreq); goto out; } @@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work) size_t transferred; int s; - _enter("R=%x", wreq->debug_id); + kenter("R=%x", wreq->debug_id); netfs_see_request(wreq, netfs_rreq_trace_see_work); if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) { @@ -684,7 +684,7 @@ void netfs_write_collection_worker(struct work_struct *work) if (wreq->origin == NETFS_DIO_WRITE) inode_dio_end(wreq->inode); - _debug("finished"); + kdebug("finished"); trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags); wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS); @@ -744,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, struct netfs_io_request *wreq = subreq->rreq; struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; - _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); + kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); switch (subreq->source) { case NETFS_UPLOAD_TO_SERVER: diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index ec6cf8707fb0..d7c971df8866 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, if (IS_ERR(wreq)) return wreq; - _enter("R=%x", wreq->debug_id); + kenter("R=%x", wreq->debug_id); ictx = netfs_inode(wreq->inode); if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags)) @@ -159,7 +159,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, subreq->max_nr_segs = INT_MAX; subreq->stream_nr = stream->stream_nr; - _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); + kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index); trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, refcount_read(&subreq->ref), @@ -215,7 +215,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream, { struct netfs_io_request *wreq = subreq->rreq; - _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); + kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) return netfs_write_subrequest_terminated(subreq, subreq->error, false); @@ -272,11 +272,11 @@ int netfs_advance_write(struct netfs_io_request *wreq, size_t part; if (!stream->avail) { - _leave("no write"); + kleave("no write"); return len; } - _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); + kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); if (subreq && start != subreq->start + subreq->len) { netfs_issue_write(wreq, stream); @@ -288,7 +288,7 @@ int netfs_advance_write(struct netfs_io_request *wreq, subreq = stream->construct; part = min(subreq->max_len - subreq->len, len); - _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len); + kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len); subreq->len += part; subreq->nr_segs++; @@ -319,7 +319,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, bool to_eof = false, streamw = false; bool debug = false; - _enter(""); + kenter(""); /* netfs_perform_write() may shift i_size around the page or from out * of the page to beyond it, but cannot move i_size into or through the @@ -329,7 +329,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, if (fpos >= i_size) { /* mmap beyond eof. */ - _debug("beyond eof"); + kdebug("beyond eof"); folio_start_writeback(folio); folio_unlock(folio); wreq->nr_group_rel += netfs_folio_written_back(folio); @@ -363,7 +363,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, } flen -= foff; - _debug("folio %zx %zx %zx", foff, flen, fsize); + kdebug("folio %zx %zx %zx", foff, flen, fsize); /* Deal with discontinuities in the stream of dirty pages. These can * arise from a number of sources: @@ -487,7 +487,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, for (int s = 0; s < NR_IO_STREAMS; s++) netfs_issue_write(wreq, &wreq->io_streams[s]); - _leave(" = 0"); + kleave(" = 0"); return 0; } @@ -522,7 +522,7 @@ int netfs_writepages(struct address_space *mapping, netfs_stat(&netfs_n_wh_writepages); do { - _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); + kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); /* It appears we don't have to handle cyclic writeback wrapping. */ WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted); @@ -546,14 +546,14 @@ int netfs_writepages(struct address_space *mapping, mutex_unlock(&ictx->wb_lock); netfs_put_request(wreq, false, netfs_rreq_trace_put_return); - _leave(" = %d", error); + kleave(" = %d", error); return error; couldnt_start: netfs_kill_dirty_pages(mapping, wbc, folio); out: mutex_unlock(&ictx->wb_lock); - _leave(" = %d", error); + kleave(" = %d", error); return error; } EXPORT_SYMBOL(netfs_writepages); @@ -590,7 +590,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c struct folio *folio, size_t copied, bool to_page_end, struct folio **writethrough_cache) { - _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", + kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u", wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); if (!*writethrough_cache) { @@ -624,7 +624,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr struct netfs_inode *ictx = netfs_inode(wreq->inode); int ret; - _enter("R=%x", wreq->debug_id); + kenter("R=%x", wreq->debug_id); if (writethrough_cache) netfs_write_folio(wreq, wbc, writethrough_cache); @@ -657,7 +657,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t loff_t start = wreq->start; int error = 0; - _enter("%zx", len); + kenter("%zx", len); if (wreq->origin == NETFS_DIO_WRITE) inode_dio_begin(wreq->inode); @@ -665,7 +665,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t while (len) { // TODO: Prepare content encryption - _debug("unbuffered %zx", len); + kdebug("unbuffered %zx", len); part = netfs_advance_write(wreq, upload, start, len, false); start += part; len -= part; @@ -684,6 +684,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t if (list_empty(&upload->subrequests)) netfs_wake_write_collector(wreq, false); - _leave(" = %d", error); + kleave(" = %d", error); return error; } diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index dddfa604491a..4a29b0138d75 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -383,11 +383,39 @@ found: struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct folio **foliop) { - struct nilfs_dir_entry *de = nilfs_get_folio(dir, 0, foliop); + struct folio *folio; + struct nilfs_dir_entry *de, *next_de; + size_t limit; + char *msg; + de = nilfs_get_folio(dir, 0, &folio); if (IS_ERR(de)) return NULL; - return nilfs_next_entry(de); + + limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */ + if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino || + !nilfs_match(1, ".", de))) { + msg = "missing '.'"; + goto fail; + } + + next_de = nilfs_next_entry(de); + /* + * If "next_de" has not reached the end of the chunk, there is + * at least one more record. Check whether it matches "..". + */ + if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) || + !nilfs_match(2, "..", next_de))) { + msg = "missing '..'"; + goto fail; + } + *foliop = folio; + return next_de; + +fail: + nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg); + folio_release_kmap(folio, de); + return NULL; } ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index f1f2573bb18d..1374635e89fa 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -246,35 +246,6 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) } /* - * Expand the size of a readahead to the size of the rsize, if at least as - * large as a page, allowing for the possibility that rsize is not pow-2 - * aligned. - */ -static void cifs_expand_readahead(struct netfs_io_request *rreq) -{ - unsigned int rsize = rreq->rsize; - loff_t misalignment, i_size = i_size_read(rreq->inode); - - if (rsize < PAGE_SIZE) - return; - - if (rsize < INT_MAX) - rsize = roundup_pow_of_two(rsize); - else - rsize = ((unsigned int)INT_MAX + 1) / 2; - - misalignment = rreq->start & (rsize - 1); - if (misalignment) { - rreq->start -= misalignment; - rreq->len += misalignment; - } - - rreq->len = round_up(rreq->len, rsize); - if (rreq->start < i_size && rreq->len > i_size - rreq->start) - rreq->len = i_size - rreq->start; -} - -/* * Completion of a request operation. */ static void cifs_rreq_done(struct netfs_io_request *rreq) @@ -329,7 +300,6 @@ const struct netfs_request_ops cifs_req_ops = { .init_request = cifs_init_request, .free_request = cifs_free_request, .free_subrequest = cifs_free_subrequest, - .expand_readahead = cifs_expand_readahead, .clamp_length = cifs_clamp_length, .issue_read = cifs_req_issue_read, .done = cifs_rreq_done, diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 8d10be1fe18a..c3ee42188d25 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -917,6 +917,40 @@ struct smb2_query_directory_rsp { __u8 Buffer[]; } __packed; +/* DeviceType Flags */ +#define FILE_DEVICE_CD_ROM 0x00000002 +#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003 +#define FILE_DEVICE_DFS 0x00000006 +#define FILE_DEVICE_DISK 0x00000007 +#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008 +#define FILE_DEVICE_FILE_SYSTEM 0x00000009 +#define FILE_DEVICE_NAMED_PIPE 0x00000011 +#define FILE_DEVICE_NETWORK 0x00000012 +#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014 +#define FILE_DEVICE_NULL 0x00000015 +#define FILE_DEVICE_PARALLEL_PORT 0x00000016 +#define FILE_DEVICE_PRINTER 0x00000018 +#define FILE_DEVICE_SERIAL_PORT 0x0000001b +#define FILE_DEVICE_STREAMS 0x0000001e +#define FILE_DEVICE_TAPE 0x0000001f +#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020 +#define FILE_DEVICE_VIRTUAL_DISK 0x00000024 +#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028 + +/* Device Characteristics */ +#define FILE_REMOVABLE_MEDIA 0x00000001 +#define FILE_READ_ONLY_DEVICE 0x00000002 +#define FILE_FLOPPY_DISKETTE 0x00000004 +#define FILE_WRITE_ONCE_MEDIA 0x00000008 +#define FILE_REMOTE_DEVICE 0x00000010 +#define FILE_DEVICE_IS_MOUNTED 0x00000020 +#define FILE_VIRTUAL_VOLUME 0x00000040 +#define FILE_DEVICE_SECURE_OPEN 0x00000100 +#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000 +#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000 +#define FILE_PORTABLE_DEVICE 0x00004000 +#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000 + /* * Maximum number of iovs we need for a set-info request. * The largest one is rename/hardlink diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index e7e07891781b..840c71c66b30 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2051,15 +2051,22 @@ out_err1: * @access: file access flags * @disposition: file disposition flags * @may_flags: set with MAY_ flags + * @is_dir: is creating open flags for directory * * Return: file open flags */ static int smb2_create_open_flags(bool file_present, __le32 access, __le32 disposition, - int *may_flags) + int *may_flags, + bool is_dir) { int oflags = O_NONBLOCK | O_LARGEFILE; + if (is_dir) { + access &= ~FILE_WRITE_DESIRE_ACCESS_LE; + ksmbd_debug(SMB, "Discard write access to a directory\n"); + } + if (access & FILE_READ_DESIRED_ACCESS_LE && access & FILE_WRITE_DESIRE_ACCESS_LE) { oflags |= O_RDWR; @@ -3167,7 +3174,9 @@ int smb2_open(struct ksmbd_work *work) open_flags = smb2_create_open_flags(file_present, daccess, req->CreateDisposition, - &may_flags); + &may_flags, + req->CreateOptions & FILE_DIRECTORY_FILE_LE || + (file_present && S_ISDIR(d_inode(path.dentry)->i_mode))); if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { if (open_flags & (O_CREAT | O_TRUNC)) { @@ -5314,8 +5323,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work, info = (struct filesystem_device_info *)rsp->Buffer; - info->DeviceType = cpu_to_le32(stfs.f_type); - info->DeviceCharacteristics = cpu_to_le32(0x00000020); + info->DeviceType = cpu_to_le32(FILE_DEVICE_DISK); + info->DeviceCharacteristics = + cpu_to_le32(FILE_DEVICE_IS_MOUNTED); + if (!test_tree_conn_flag(work->tcon, + KSMBD_TREE_CONN_FLAG_WRITABLE)) + info->DeviceCharacteristics |= + cpu_to_le32(FILE_READ_ONLY_DEVICE); rsp->OutputBufferLength = cpu_to_le32(8); break; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index eee7320ab0b0..17e409ceaa33 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -2057,7 +2057,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, goto out; features = uffdio_api.features; ret = -EINVAL; - if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) + if (uffdio_api.api != UFFD_API) goto err_out; ret = -EPERM; if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) @@ -2081,6 +2081,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC; #endif + + ret = -EINVAL; + if (features & ~uffdio_api.features) + goto err_out; + uffdio_api.ioctls = UFFD_API_IOCTLS; ret = -EFAULT; if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e98ba5a5cf79..6503c85b10a3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -856,7 +856,7 @@ static inline u32 type_flag(u32 type) /* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) { - return prog->type == BPF_PROG_TYPE_EXT ? + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? prog->aux->dst_prog->type : prog->type; } diff --git a/include/linux/cache.h b/include/linux/cache.h index 0ecb17bb6883..ca2a05682a54 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -13,6 +13,32 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif +/** + * SMP_CACHE_ALIGN - align a value to the L2 cacheline size + * @x: value to align + * + * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes, + * this needs to be accounted. + * + * Return: aligned value. + */ +#ifndef SMP_CACHE_ALIGN +#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES) +#endif + +/* + * ``__aligned_largest`` aligns a field to the value most optimal for the + * target architecture to perform memory operations. Get the actual value + * to be able to use it anywhere else. + */ +#ifndef __LARGEST_ALIGN +#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest) +#endif + +#ifndef LARGEST_ALIGN +#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN) +#endif + /* * __read_mostly is used to keep rarely changing variables out of frequently * updated cachelines. Its use should be reserved for data that is used @@ -95,6 +121,39 @@ __u8 __cacheline_group_end__##GROUP[0] #endif +/** + * __cacheline_group_begin_aligned - declare an aligned group start + * @GROUP: name of the group + * @...: optional group alignment + * + * The following block inside a struct: + * + * __cacheline_group_begin_aligned(grp); + * field a; + * field b; + * __cacheline_group_end_aligned(grp); + * + * will always be aligned to either the specified alignment or + * ``SMP_CACHE_BYTES``. + */ +#define __cacheline_group_begin_aligned(GROUP, ...) \ + __cacheline_group_begin(GROUP) \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + +/** + * __cacheline_group_end_aligned - declare an aligned group end + * @GROUP: name of the group + * @...: optional alignment (same as was in __cacheline_group_begin_aligned()) + * + * Note that the end marker is aligned to sizeof(long) to allow more precise + * size assertion. It also declares a padding at the end to avoid next field + * falling into this cacheline. + */ +#define __cacheline_group_end_aligned(GROUP, ...) \ + __cacheline_group_end(GROUP) __aligned(sizeof(long)); \ + struct { } __cacheline_group_pad__##GROUP \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + #ifndef CACHELINE_ASSERT_GROUP_MEMBER #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ diff --git a/include/linux/closure.h b/include/linux/closure.h index 59b8c06b11ff..2af44427107d 100644 --- a/include/linux/closure.h +++ b/include/linux/closure.h @@ -159,6 +159,7 @@ struct closure { #ifdef CONFIG_DEBUG_CLOSURES #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e +#define CLOSURE_MAGIC_STACK 0xc05451cc unsigned int magic; struct list_head all; @@ -323,12 +324,18 @@ static inline void closure_init_stack(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif } static inline void closure_init_stack_release(struct closure *cl) { memset(cl, 0, sizeof(struct closure)); atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); +#ifdef CONFIG_DEBUG_CLOSURES + cl->magic = CLOSURE_MAGIC_STACK; +#endif } /** diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e213b5508da6..a1ee76936f53 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -181,6 +181,7 @@ struct ethtool_rxfh_context { /* private: driver private data, indirection table, and hash key are * stored sequentially in @data area. Use below helpers to access. */ + u32 key_off; u8 data[] __aligned(sizeof(void *)); }; @@ -196,19 +197,10 @@ static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx) static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx) { - return (u8 *)(ethtool_rxfh_context_indir(ctx) + ctx->indir_size); + return &ctx->data[ctx->key_off]; } -static inline size_t ethtool_rxfh_context_size(u32 indir_size, u32 key_size, - u16 priv_size) -{ - size_t indir_bytes = array_size(indir_size, sizeof(u32)); - size_t flex_len; - - flex_len = size_add(size_add(indir_bytes, key_size), - ALIGN(priv_size, sizeof(u32))); - return struct_size_t(struct ethtool_rxfh_context, data, flex_len); -} +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id); /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ @@ -721,6 +713,10 @@ struct ethtool_rxfh_param { * contexts. * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor * RSS. + * @rxfh_indir_space: max size of RSS indirection tables, if indirection table + * size as returned by @get_rxfh_indir_size may change during lifetime + * of the device. Leave as 0 if the table size is constant. + * @rxfh_key_space: same as @rxfh_indir_space, but for the key. * @rxfh_priv_size: size of the driver private data area the core should * allocate for an RSS context (in &struct ethtool_rxfh_context). * @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this @@ -938,6 +934,8 @@ struct ethtool_ops { u32 cap_link_lanes_supported:1; u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; + u32 rxfh_indir_space; + u16 rxfh_key_space; u16 rxfh_priv_size; u32 rxfh_max_context_id; u32 supported_coalesce_params; diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index bdf7f3eddf0a..4c91a019972b 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -19,6 +19,7 @@ enum fscache_cache_trace; enum fscache_cookie_trace; enum fscache_access_trace; +enum fscache_volume_trace; enum fscache_cache_state { FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */ @@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie); extern void fscache_io_error(struct fscache_cache *cache); +extern struct fscache_volume * +fscache_try_get_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); +extern void fscache_put_volume(struct fscache_volume *volume, + enum fscache_volume_trace where); extern void fscache_end_volume_access(struct fscache_volume *volume, struct fscache_cookie *cookie, enum fscache_access_trace why); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index fdad0071d599..360d42f041b0 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1994,7 +1994,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 migration_tracking_state[0x1]; u8 reserved_at_ca[0x6]; u8 migration_in_chunks[0x1]; - u8 reserved_at_d1[0xf]; + u8 reserved_at_d1[0x1]; + u8 sf_eq_usage[0x1]; + u8 reserved_at_d3[0xd]; u8 cross_vhca_object_to_object_supported[0x20]; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 586a8f0104d7..1dc6248feb83 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1979,8 +1979,9 @@ static inline int subsection_map_index(unsigned long pfn) static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) { int idx = subsection_map_index(pfn); + struct mem_section_usage *usage = READ_ONCE(ms->usage); - return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); + return usage ? test_bit(idx, usage->subsection_map) : 0; } #else static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 93558645c6d0..607009150b5f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1819,7 +1819,8 @@ enum netdev_reg_state { * @priv_flags: Like 'flags' but invisible to userspace, * see if.h for the definitions * @gflags: Global flags ( kept as legacy ) - * @padded: How much padding added by alloc_netdev() + * @priv_len: Size of the ->priv flexible array + * @priv: Flexible array containing private data * @operstate: RFC2863 operstate * @link_mode: Mapping policy to operstate * @if_port: Selectable AUI, TP, ... @@ -2199,10 +2200,10 @@ struct net_device { unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; - unsigned short padded; + int irq; + u32 priv_len; spinlock_t addr_list_lock; - int irq; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; @@ -2406,7 +2407,10 @@ struct net_device { /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ struct dim_irq_moder *irq_moder; -}; + + u8 priv[] ____cacheline_aligned + __counted_by(priv_len); +} ____cacheline_aligned; #define to_net_dev(d) container_of(d, struct net_device, dev) /* @@ -2596,7 +2600,7 @@ void dev_net_set(struct net_device *dev, struct net *net) */ static inline void *netdev_priv(const struct net_device *dev) { - return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); + return (void *)dev->priv; } /* Set the sysfs physical device reference for the network logical device @@ -3127,7 +3131,6 @@ static inline void unregister_netdevice(struct net_device *dev) int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); -void netdev_freemem(struct net_device *dev); void init_dummy_netdev(struct net_device *dev); struct net_device *netdev_get_xmit_slave(struct net_device *dev, diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 1acf5bac7f50..8c236c651d1d 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio) static inline bool page_ref_add_unless(struct page *page, int nr, int u) { - bool ret = atomic_add_unless(&page->_refcount, nr, u); + bool ret = false; + + rcu_read_lock(); + /* avoid writing to the vmemmap area being remapped */ + if (!page_is_fake_head(page) && page_ref_count(page) != u) + ret = atomic_add_unless(&page->_refcount, nr, u); + rcu_read_unlock(); if (page_ref_tracepoint_active(page_ref_mod_unless)) __page_ref_mod_unless(page, nr, ret); @@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio) return folio_ref_add_unless(folio, 1, 0); } -static inline bool folio_ref_try_add_rcu(struct folio *folio, int count) -{ -#ifdef CONFIG_TINY_RCU - /* - * The caller guarantees the folio will not be freed from interrupt - * context, so (on !SMP) we only need preemption to be disabled - * and TINY_RCU does that for us. - */ -# ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic() && !irqs_disabled()); -# endif - VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio); - folio_ref_add(folio, count); -#else - if (unlikely(!folio_ref_add_unless(folio, count, 0))) { - /* Either the folio has been freed, or will be freed. */ - return false; - } -#endif - return true; -} - -/** - * folio_try_get_rcu - Attempt to increase the refcount on a folio. - * @folio: The folio. - * - * This is a version of folio_try_get() optimised for non-SMP kernels. - * If you are still holding the rcu_read_lock() after looking up the - * page and know that the page cannot have its refcount decreased to - * zero in interrupt context, you can use this instead of folio_try_get(). - * - * Example users include get_user_pages_fast() (as pages are not unmapped - * from interrupt context) and the page cache lookups (as pages are not - * truncated from interrupt context). We also know that pages are not - * frozen in interrupt context for the purposes of splitting or migration. - * - * You can also use this function if you're holding a lock that prevents - * pages being frozen & removed; eg the i_pages lock for the page cache - * or the mmap_lock or page table lock for page tables. In this case, - * it will always succeed, and you could have used a plain folio_get(), - * but it's sometimes more convenient to have a common function called - * from both locked and RCU-protected contexts. - * - * Return: True if the reference count was successfully incremented. - */ -static inline bool folio_try_get_rcu(struct folio *folio) +static inline bool folio_ref_try_add(struct folio *folio, int count) { - return folio_ref_try_add_rcu(folio, 1); + return folio_ref_add_unless(folio, count, 0); } static inline int page_ref_freeze(struct page *page, int count) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 59f1df0cde5a..a0a026d2d244 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -354,11 +354,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) * a good order (that's 1MB if you're using 4kB pages) */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER +#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER #else -#define MAX_PAGECACHE_ORDER 8 +#define PREFERRED_MAX_PAGECACHE_ORDER 8 #endif +/* + * xas_split_alloc() does not support arbitrary orders. This implies no + * 512MB THP on ARM64 with 64KB base page size. + */ +#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) +#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) + /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The file. diff --git a/include/linux/sched.h b/include/linux/sched.h index 5ff5e65a4627..d07d3645d2d5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2198,13 +2198,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #ifdef CONFIG_MEM_ALLOC_PROFILING -static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) +static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) { swap(current->alloc_tag, tag); return tag; } -static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) +static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) { #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 98fdef6e28f2..67b9a15a5330 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -533,6 +533,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queue_empty: signal green light for opportunistically skipping the queue * for spi_sync transfers. * @must_async: disable all fast paths in the core + * @defer_optimize_message: set to true if controller cannot pre-optimize messages + * and needs to defer the optimization step until the message is actually + * being transferred * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -776,6 +779,7 @@ struct spi_controller { /* Flag for enabling opportunistic skipping of the queue in spi_sync */ bool queue_empty; bool must_async; + bool defer_optimize_message; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) diff --git a/include/linux/swap.h b/include/linux/swap.h index bd450023b9a4..e685e93ba354 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -354,7 +354,8 @@ static inline swp_entry_t page_swap_entry(struct page *page) } /* linux/mm/workingset.c */ -bool workingset_test_recent(void *shadow, bool file, bool *workingset); +bool workingset_test_recent(void *shadow, bool file, bool *workingset, + bool flush); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 21a67dc9efe8..e93ee8d936a9 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -490,9 +490,16 @@ static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle) { } #endif + +static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip) +{ #ifdef CONFIG_TCG_TPM2_HMAC + return chip->auth; +#else + return NULL; +#endif +} -int tpm2_start_auth_session(struct tpm_chip *chip); void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle, u8 *name); void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, @@ -504,9 +511,27 @@ static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, u8 *passphrase, int passphraselen) { - tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, - passphraselen); + struct tpm_header *head; + int offset; + + if (tpm2_chip_auth(chip)) { + tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen); + } else { + offset = buf->handles * 4 + TPM_HEADER_SIZE; + head = (struct tpm_header *)buf->data; + + /* + * If the only sessions are optional, the command tag must change to + * TPM2_ST_NO_SESSIONS. + */ + if (tpm_buf_length(buf) == offset) + head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + } } + +#ifdef CONFIG_TCG_TPM2_HMAC + +int tpm2_start_auth_session(struct tpm_chip *chip); void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc); @@ -521,56 +546,6 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip) static inline void tpm2_end_auth_session(struct tpm_chip *chip) { } -static inline void tpm_buf_append_name(struct tpm_chip *chip, - struct tpm_buf *buf, - u32 handle, u8 *name) -{ - tpm_buf_append_u32(buf, handle); - /* count the number of handles in the upper bits of flags */ - buf->handles++; -} -static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, u8 *passphrase, - int passphraselen) -{ - /* offset tells us where the sessions area begins */ - int offset = buf->handles * 4 + TPM_HEADER_SIZE; - u32 len = 9 + passphraselen; - - if (tpm_buf_length(buf) != offset) { - /* not the first session so update the existing length */ - len += get_unaligned_be32(&buf->data[offset]); - put_unaligned_be32(len, &buf->data[offset]); - } else { - tpm_buf_append_u32(buf, len); - } - /* auth handle */ - tpm_buf_append_u32(buf, TPM2_RS_PW); - /* nonce */ - tpm_buf_append_u16(buf, 0); - /* attributes */ - tpm_buf_append_u8(buf, 0); - /* passphrase */ - tpm_buf_append_u16(buf, passphraselen); - tpm_buf_append(buf, passphrase, passphraselen); -} -static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, - u8 *passphrase, - int passphraselen) -{ - int offset = buf->handles * 4 + TPM_HEADER_SIZE; - struct tpm_header *head = (struct tpm_header *) buf->data; - - /* - * if the only sessions are optional, the command tag - * must change to TPM2_ST_NO_SESSIONS - */ - if (tpm_buf_length(buf) == offset) - head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); -} static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6f992aff74ae..192d72c8b465 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1598,6 +1598,7 @@ struct cfg80211_color_change_settings { * * Used to pass interface combination parameters * + * @radio_idx: wiphy radio index or -1 for global * @num_different_channels: the number of different channels we want * to use for verification * @radar_detect: a bitmap where each bit corresponds to a channel @@ -1611,6 +1612,7 @@ struct cfg80211_color_change_settings { * the verification */ struct iface_combination_params { + int radio_idx; int num_different_channels; u8 radar_detect; int iftype_num[NUM_NL80211_IFTYPES]; @@ -4580,6 +4582,8 @@ struct mgmt_frame_regs { * * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames. * @set_ttlm: set the TID to link mapping. + * @get_radio_mask: get bitmask of radios in use. + * (invoked with the wiphy mutex held) */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4941,6 +4945,7 @@ struct cfg80211_ops { struct cfg80211_set_hw_timestamp *hwts); int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ttlm_params *params); + u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev); }; /* @@ -5046,7 +5051,9 @@ struct ieee80211_iface_limit { * struct ieee80211_iface_combination - possible interface combination * * With this structure the driver can describe which interface - * combinations it supports concurrently. + * combinations it supports concurrently. When set in a struct wiphy_radio, + * the combinations refer to combinations of interfaces currently active on + * that radio. * * Examples: * @@ -5406,6 +5413,38 @@ struct wiphy_iftype_akm_suites { int n_akm_suites; }; +/** + * struct wiphy_radio_freq_range - wiphy frequency range + * @start_freq: start range edge frequency (kHz) + * @end_freq: end range edge frequency (kHz) + */ +struct wiphy_radio_freq_range { + u32 start_freq; + u32 end_freq; +}; + + +/** + * struct wiphy_radio - physical radio of a wiphy + * This structure describes a physical radio belonging to a wiphy. + * It is used to describe concurrent-channel capabilities. Only one channel + * can be active on the radio described by struct wiphy_radio. + * + * @freq_range: frequency range that the radio can operate on. + * @n_freq_range: number of elements in @freq_range + * + * @iface_combinations: Valid interface combinations array, should not + * list single interface types. + * @n_iface_combinations: number of entries in @iface_combinations array. + */ +struct wiphy_radio { + const struct wiphy_radio_freq_range *freq_range; + int n_freq_range; + + const struct ieee80211_iface_combination *iface_combinations; + int n_iface_combinations; +}; + #define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff /** @@ -5624,6 +5663,9 @@ struct wiphy_iftype_akm_suites { * A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver * supports enabling HW timestamping for all peers (i.e. no need to * specify a mac address). + * + * @radio: radios belonging to this wiphy + * @n_radio: number of radios */ struct wiphy { struct mutex mtx; @@ -5774,6 +5816,9 @@ struct wiphy { u16 hw_timestamp_max_peers; + int n_radio; + const struct wiphy_radio *radio; + char priv[] __aligned(NETDEV_ALIGN); }; @@ -6464,6 +6509,17 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) } /** + * cfg80211_radio_chandef_valid - Check if the radio supports the chandef + * + * @radio: wiphy radio + * @chandef: chandef for current channel + * + * Return: whether or not the given chandef is valid for the given radio + */ +bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, + const struct cfg80211_chan_def *chandef); + +/** * ieee80211_get_response_rate - get basic rate for a given rate * * @sband: the band to look for rates in diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h new file mode 100644 index 000000000000..bdb0c043ce61 --- /dev/null +++ b/include/net/libeth/cache.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBETH_CACHE_H +#define __LIBETH_CACHE_H + +#include <linux/cache.h> + +/** + * libeth_cacheline_group_assert - make sure cacheline group size is expected + * @type: type of the structure containing the group + * @grp: group name inside the struct + * @sz: expected group size + */ +#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64 +#define libeth_cacheline_group_assert(type, grp, sz) \ + static_assert(offsetof(type, __cacheline_group_end__##grp) - \ + offsetofend(type, __cacheline_group_begin__##grp) == \ + (sz)) +#define __libeth_cacheline_struct_assert(type, sz) \ + static_assert(sizeof(type) == (sz)) +#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */ +#define libeth_cacheline_group_assert(type, grp, sz) \ + static_assert(offsetof(type, __cacheline_group_end__##grp) - \ + offsetofend(type, __cacheline_group_begin__##grp) <= \ + (sz)) +#define __libeth_cacheline_struct_assert(type, sz) \ + static_assert(sizeof(type) <= (sz)) +#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */ + +#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1) +#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2)) +#define __libeth_cls3(sz1, sz2, sz3) \ + (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3)) +#define __libeth_cls(...) \ + CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) + +/** + * libeth_cacheline_struct_assert - make sure CL-based struct size is expected + * @type: type of the struct + * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold) + * + * When a struct contains several CL groups, it's difficult to predict its size + * on different architectures. The macro instead takes sizes of all of the + * groups the structure contains and generates the final struct size. + */ +#define libeth_cacheline_struct_assert(type, ...) \ + __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \ + static_assert(__alignof(type) >= SMP_CACHE_BYTES) + +/** + * libeth_cacheline_set_assert - make sure CL-based struct layout is expected + * @type: type of the struct + * @ro: expected size of the read-mostly group + * @rw: expected size of the read-write group + * @c: expected size of the cold group + * + * Check that each group size is expected and then do final struct size check. + */ +#define libeth_cacheline_set_assert(type, ro, rw, c) \ + libeth_cacheline_group_assert(type, read_mostly, ro); \ + libeth_cacheline_group_assert(type, read_write, rw); \ + libeth_cacheline_group_assert(type, cold, c); \ + libeth_cacheline_struct_assert(type, ro, rw, c) + +#endif /* __LIBETH_CACHE_H */ diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h index f29ea3e34c6c..43574bd6612f 100644 --- a/include/net/libeth/rx.h +++ b/include/net/libeth/rx.h @@ -17,6 +17,8 @@ #define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ #define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) +/* Maximum supported L2-L4 header length */ +#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256)) /* Always use order-0 pages */ #define LIBETH_RX_PAGE_ORDER 0 @@ -44,12 +46,26 @@ struct libeth_fqe { } __aligned_largest; /** + * enum libeth_fqe_type - enum representing types of Rx buffers + * @LIBETH_FQE_MTU: buffer size is determined by MTU + * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames + * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers + */ +enum libeth_fqe_type { + LIBETH_FQE_MTU = 0U, + LIBETH_FQE_SHORT, + LIBETH_FQE_HDR, +}; + +/** * struct libeth_fq - structure representing a buffer (fill) queue * @fp: hotpath part of the structure * @pp: &page_pool for buffer management * @fqes: array of Rx buffers * @truesize: size to allocate per buffer, w/overhead * @count: number of descriptors/buffers the queue has + * @type: type of the buffers this queue has + * @hsplit: flag whether header split is enabled * @buf_len: HW-writeable length per each buffer * @nid: ID of the closest NUMA node with memory */ @@ -63,6 +79,9 @@ struct libeth_fq { ); /* Cold fields */ + enum libeth_fqe_type type:2; + bool hsplit:1; + u32 buf_len; int nid; }; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 5f36b3d979dd..0a04eaf5343c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -250,6 +250,7 @@ struct ieee80211_chan_req { * @min_def: the minimum channel definition currently required. * @ap: the channel definition the AP actually is operating as, * for use with (wider bandwidth) OFDMA + * @radio_idx: index of the wiphy radio used used for this channel * @rx_chains_static: The number of RX chains that must always be * active on the channel to receive MIMO transmissions * @rx_chains_dynamic: The number of RX chains that must be enabled @@ -264,6 +265,7 @@ struct ieee80211_chanctx_conf { struct cfg80211_chan_def min_def; struct cfg80211_chan_def ap; + int radio_idx; u8 rx_chains_static, rx_chains_dynamic; bool radar_enabled; @@ -2767,14 +2769,6 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on * TDLS links. * - * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the - * mgd_prepare_tx() callback to be called before transmission of a - * deauthentication frame in case the association was completed but no - * beacon was heard. This is required in multi-channel scenarios, where the - * virtual interface might not be given air time for the transmission of - * the frame, as it is not synced with the AP/P2P GO yet, and thus the - * deauthentication frame might not be transmitted. - * * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * @@ -2874,7 +2868,6 @@ enum ieee80211_hw_flags { IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_SUPPORTS_TX_FRAG, IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, - IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP, IEEE80211_HW_BUFF_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, @@ -3787,13 +3780,15 @@ enum ieee80211_reconfig_type { * @success: whether the frame exchange was successful, only * used with the mgd_complete_tx() method, and then only * valid for auth and (re)assoc. + * @was_assoc: set if this call is due to deauth/disassoc + * while just having been associated * @link_id: the link id on which the frame will be TX'ed. * Only used with the mgd_prepare_tx() method. */ struct ieee80211_prep_tx_info { u16 duration; u16 subtype; - u8 success:1; + u8 success:1, was_assoc:1; int link_id; }; @@ -4242,12 +4237,9 @@ struct ieee80211_prep_tx_info { * yet it need not necessarily be given airtime, in particular since any * transmission to a P2P GO needs to be synchronized against the GO's * powersave state. mac80211 will call this function before transmitting a - * management frame prior to having successfully associated to allow the - * driver to give it channel time for the transmission, to get a response - * and to be able to synchronize with the GO. - * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211 - * would also call this function before transmitting a deauthentication - * frame in case that no beacon was heard from the AP/P2P GO. + * management frame prior to transmitting that frame to allow the driver + * to give it channel time for the transmission, to get a response and be + * able to synchronize with the GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. * Additional information is passed in the &struct ieee80211_prep_tx_info diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index b70bcc14ceda..50569fed7868 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -129,6 +129,16 @@ struct page_pool_stats { }; #endif +/* The whole frag API block must stay within one cacheline. On 32-bit systems, + * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``. + * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``. + * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that + * one for simplicity. + * Having it aligned to a cacheline boundary may be excessive and doesn't bring + * any good. + */ +#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long)) + struct page_pool { struct page_pool_params_fast p; @@ -142,19 +152,11 @@ struct page_pool { bool system:1; /* This is a global percpu pool */ #endif - /* The following block must stay within one cacheline. On 32-bit - * systems, sizeof(long) == sizeof(int), so that the block size is - * ``3 * sizeof(long)``. On 64-bit systems, the actual size is - * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of - * them is ``4 * sizeof(long)``, so just use that one for simplicity. - * Having it aligned to a cacheline boundary may be excessive and - * doesn't bring any good. - */ - __cacheline_group_begin(frag) __aligned(4 * sizeof(long)); + __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); long frag_users; netmem_ref frag_page; unsigned int frag_offset; - __cacheline_group_end(frag); + __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); struct delayed_work release_dw; void (*disconnect)(void *pool); diff --git a/include/net/psample.h b/include/net/psample.h index c52e9ebd88dd..5071b5fc2b59 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -38,13 +38,15 @@ struct sk_buff; #if IS_ENABLED(CONFIG_PSAMPLE) -void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, - u32 sample_rate, const struct psample_metadata *md); +void psample_sample_packet(struct psample_group *group, + const struct sk_buff *skb, u32 sample_rate, + const struct psample_metadata *md); #else static inline void psample_sample_packet(struct psample_group *group, - struct sk_buff *skb, u32 sample_rate, + const struct sk_buff *skb, + u32 sample_rate, const struct psample_metadata *md) { } diff --git a/include/net/tcx.h b/include/net/tcx.h index 72a3e75e539f..5ce0ce9e0c02 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -13,7 +13,7 @@ struct mini_Qdisc; struct tcx_entry { struct mini_Qdisc __rcu *miniq; struct bpf_mprog_bundle bundle; - bool miniq_active; + u32 miniq_active; struct rcu_head rcu; }; @@ -125,11 +125,16 @@ static inline void tcx_skeys_dec(bool ingress) tcx_dec(); } -static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry, - const bool active) +static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry) { ASSERT_RTNL(); - tcx_entry(entry)->miniq_active = active; + tcx_entry(entry)->miniq_active++; +} + +static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry) +{ + ASSERT_RTNL(); + tcx_entry(entry)->miniq_active--; } static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index fadf406b5260..c978fa2893a5 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -2556,9 +2556,10 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count, TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, - TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr), + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr, + u64 last_root_id, u64 last_ino), - TP_ARGS(fs_info, nr_to_scan, nr), + TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino), TP_STRUCT__entry_btrfs( __field( long, nr_to_scan ) @@ -2570,8 +2571,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, TP_fast_assign_btrfs(fs_info, __entry->nr_to_scan = nr_to_scan; __entry->nr = nr; - __entry->last_root_id = fs_info->extent_map_shrinker_last_root; - __entry->last_ino = fs_info->extent_map_shrinker_last_ino; + __entry->last_root_id = last_root_id; + __entry->last_ino = last_ino; ), TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", @@ -2581,9 +2582,10 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit, - TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr), + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr, + u64 last_root_id, u64 last_ino), - TP_ARGS(fs_info, nr_dropped, nr), + TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino), TP_STRUCT__entry_btrfs( __field( long, nr_dropped ) @@ -2595,8 +2597,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit, TP_fast_assign_btrfs(fs_info, __entry->nr_dropped = nr_dropped; __entry->nr = nr; - __entry->last_root_id = fs_info->extent_map_shrinker_last_root; - __entry->last_ino = fs_info->extent_map_shrinker_last_ino; + __entry->last_root_id = last_root_id; + __entry->last_ino = last_ino; ), TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h index a6190aa1b406..f1a73aa83fbb 100644 --- a/include/trace/events/fscache.h +++ b/include/trace/events/fscache.h @@ -35,12 +35,14 @@ enum fscache_volume_trace { fscache_volume_get_cookie, fscache_volume_get_create_work, fscache_volume_get_hash_collision, + fscache_volume_get_withdraw, fscache_volume_free, fscache_volume_new_acquire, fscache_volume_put_cookie, fscache_volume_put_create_work, fscache_volume_put_hash_collision, fscache_volume_put_relinquish, + fscache_volume_put_withdraw, fscache_volume_see_create_work, fscache_volume_see_hash_wake, fscache_volume_wait_create_work, @@ -120,12 +122,14 @@ enum fscache_access_trace { EM(fscache_volume_get_cookie, "GET cook ") \ EM(fscache_volume_get_create_work, "GET creat") \ EM(fscache_volume_get_hash_collision, "GET hcoll") \ + EM(fscache_volume_get_withdraw, "GET withd") \ EM(fscache_volume_free, "FREE ") \ EM(fscache_volume_new_acquire, "NEW acq ") \ EM(fscache_volume_put_cookie, "PUT cook ") \ EM(fscache_volume_put_create_work, "PUT creat") \ EM(fscache_volume_put_hash_collision, "PUT hcoll") \ EM(fscache_volume_put_relinquish, "PUT relnq") \ + EM(fscache_volume_put_withdraw, "PUT withd") \ EM(fscache_volume_see_create_work, "SEE creat") \ EM(fscache_volume_see_hash_wake, "SEE hwake") \ E_(fscache_volume_wait_create_work, "WAIT crea") diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index aaed8e12ad0b..926b1deb1116 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -802,6 +802,9 @@ struct drm_panthor_queue_submit { * Must be 64-bit/8-byte aligned (the size of a CS instruction) * * Can be zero if stream_addr is zero too. + * + * When the stream size is zero, the queue submit serves as a + * synchronization point. */ __u32 stream_size; @@ -822,6 +825,8 @@ struct drm_panthor_queue_submit { * ensure the GPU doesn't get garbage when reading the indirect command * stream buffers. If you want the cache flush to happen * unconditionally, pass a zero here. + * + * Ignored when stream_size is zero. */ __u32 latest_flush; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6ae3997061b6..f97f5adc8d51 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2052,6 +2052,10 @@ enum nl80211_commands { * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported * interface combinations. In each nested item, it contains attributes * defined in &enum nl80211_if_combination_attrs. + * If the wiphy uses multiple radios (@NL80211_ATTR_WIPHY_RADIOS is set), + * this attribute contains the interface combinations of the first radio. + * See @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS for the global wiphy + * combinations for the sum of all radios. * @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like * %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that * are managed in software: interfaces of these types aren't subject to @@ -2856,6 +2860,14 @@ enum nl80211_commands { * %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs * are used on this connection * + * @NL80211_ATTR_WIPHY_RADIOS: Nested attribute describing physical radios + * belonging to this wiphy. See &enum nl80211_wiphy_radio_attrs. + * + * @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS: Nested attribute listing the + * supported interface combinations for all radios combined. In each + * nested item, it contains attributes defined in + * &enum nl80211_if_combination_attrs. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3401,6 +3413,9 @@ enum nl80211_attrs { NL80211_ATTR_ASSOC_SPP_AMSDU, + NL80211_ATTR_WIPHY_RADIOS, + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -8005,4 +8020,54 @@ enum nl80211_ap_settings_flags { NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 1 << 1, }; +/** + * enum nl80211_wiphy_radio_attrs - wiphy radio attributes + * + * @__NL80211_WIPHY_RADIO_ATTR_INVALID: Invalid + * + * @NL80211_WIPHY_RADIO_ATTR_INDEX: Index of this radio (u32) + * @NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE: Frequency range supported by this + * radio. Attribute may be present multiple times. + * @NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION: Supported interface + * combination for this radio. Attribute may be present multiple times + * and contains attributes defined in &enum nl80211_if_combination_attrs. + * + * @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal + * @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute + */ +enum nl80211_wiphy_radio_attrs { + __NL80211_WIPHY_RADIO_ATTR_INVALID, + + NL80211_WIPHY_RADIO_ATTR_INDEX, + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE, + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION, + + /* keep last */ + __NL80211_WIPHY_RADIO_ATTR_LAST, + NL80211_WIPHY_RADIO_ATTR_MAX = __NL80211_WIPHY_RADIO_ATTR_LAST - 1, +}; + +/** + * enum nl80211_wiphy_radio_freq_range - wiphy radio frequency range + * + * @__NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID: Invalid + * + * @NL80211_WIPHY_RADIO_FREQ_ATTR_START: Frequency range start (u32). + * The unit is kHz. + * @NL80211_WIPHY_RADIO_FREQ_ATTR_END: Frequency range end (u32). + * The unit is kHz. + * + * @__NL80211_WIPHY_RADIO_FREQ_ATTR_LAST: Internal + * @NL80211_WIPHY_RADIO_FREQ_ATTR_MAX: Highest attribute + */ +enum nl80211_wiphy_radio_freq_range { + __NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID, + + NL80211_WIPHY_RADIO_FREQ_ATTR_START, + NL80211_WIPHY_RADIO_FREQ_ATTR_END, + + __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST, + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST - 1, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index f33d914d8f46..91583690bddc 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -8,11 +8,14 @@ #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) +/* This ioctl is only supported with secure device nodes */ #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) +/* This ioctl is only supported with secure device nodes */ #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8) +/* This ioctl is only supported with secure device nodes */ #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static) #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map) #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 4ff11779699e..520f49f422fe 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -415,7 +415,7 @@ const char *btf_type_str(const struct btf_type *t) struct btf_show { u64 flags; void *target; /* target of show operation (seq file, buffer) */ - void (*showfn)(struct btf_show *show, const char *fmt, va_list args); + __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args); const struct btf *btf; /* below are used during iteration */ struct { @@ -7538,8 +7538,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); } -static void btf_seq_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt, + va_list args) { seq_vprintf((struct seq_file *)show->target, fmt, args); } @@ -7572,8 +7572,8 @@ struct btf_show_snprintf { int len; /* length we would have written */ }; -static void btf_snprintf_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt, + va_list args) { struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; int len; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 5241ba671c5a..b5f0adae8293 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1084,7 +1084,10 @@ struct bpf_async_cb { struct bpf_prog *prog; void __rcu *callback_fn; void *value; - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct work_struct delete_work; + }; u64 flags; }; @@ -1107,6 +1110,7 @@ struct bpf_async_cb { struct bpf_hrtimer { struct bpf_async_cb cb; struct hrtimer timer; + atomic_t cancelling; }; struct bpf_work { @@ -1219,6 +1223,21 @@ static void bpf_wq_delete_work(struct work_struct *work) kfree_rcu(w, cb.rcu); } +static void bpf_timer_delete_work(struct work_struct *work) +{ + struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); + + /* Cancel the timer and wait for callback to complete if it was running. + * If hrtimer_cancel() can be safely called it's safe to call + * kfree_rcu(t) right after for both preallocated and non-preallocated + * maps. The async->cb = NULL was already done and no code path can see + * address 't' anymore. Timer if armed for existing bpf_hrtimer before + * bpf_timer_cancel_and_free will have been cancelled. + */ + hrtimer_cancel(&t->timer); + kfree_rcu(t, cb.rcu); +} + static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, enum bpf_async_type type) { @@ -1262,6 +1281,8 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u clockid = flags & (MAX_CLOCKS - 1); t = (struct bpf_hrtimer *)cb; + atomic_set(&t->cancelling, 0); + INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); t->timer.function = bpf_timer_cb; cb->value = (void *)async - map->record->timer_off; @@ -1440,7 +1461,8 @@ static void drop_prog_refcnt(struct bpf_async_cb *async) BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) { - struct bpf_hrtimer *t; + struct bpf_hrtimer *t, *cur_t; + bool inc = false; int ret = 0; if (in_nmi()) @@ -1452,14 +1474,41 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) ret = -EINVAL; goto out; } - if (this_cpu_read(hrtimer_running) == t) { + + cur_t = this_cpu_read(hrtimer_running); + if (cur_t == t) { /* If bpf callback_fn is trying to bpf_timer_cancel() * its own timer the hrtimer_cancel() will deadlock - * since it waits for callback_fn to finish + * since it waits for callback_fn to finish. */ ret = -EDEADLK; goto out; } + + /* Only account in-flight cancellations when invoked from a timer + * callback, since we want to avoid waiting only if other _callbacks_ + * are waiting on us, to avoid introducing lockups. Non-callback paths + * are ok, since nobody would synchronously wait for their completion. + */ + if (!cur_t) + goto drop; + atomic_inc(&t->cancelling); + /* Need full barrier after relaxed atomic_inc */ + smp_mb__after_atomic(); + inc = true; + if (atomic_read(&cur_t->cancelling)) { + /* We're cancelling timer t, while some other timer callback is + * attempting to cancel us. In such a case, it might be possible + * that timer t belongs to the other callback, or some other + * callback waiting upon it (creating transitive dependencies + * upon us), and we will enter a deadlock if we continue + * cancelling and waiting for it synchronously, since it might + * do the same. Bail! + */ + ret = -EDEADLK; + goto out; + } +drop: drop_prog_refcnt(&t->cb); out: __bpf_spin_unlock_irqrestore(&timer->lock); @@ -1467,6 +1516,8 @@ out: * if it was running. */ ret = ret ?: hrtimer_cancel(&t->timer); + if (inc) + atomic_dec(&t->cancelling); rcu_read_unlock(); return ret; } @@ -1512,25 +1563,39 @@ void bpf_timer_cancel_and_free(void *val) if (!t) return; - /* Cancel the timer and wait for callback to complete if it was running. - * If hrtimer_cancel() can be safely called it's safe to call kfree(t) - * right after for both preallocated and non-preallocated maps. - * The async->cb = NULL was already done and no code path can - * see address 't' anymore. - * - * Check that bpf_map_delete/update_elem() wasn't called from timer - * callback_fn. In such case don't call hrtimer_cancel() (since it will - * deadlock) and don't call hrtimer_try_to_cancel() (since it will just - * return -1). Though callback_fn is still running on this cpu it's + /* We check that bpf_map_delete/update_elem() was called from timer + * callback_fn. In such case we don't call hrtimer_cancel() (since it + * will deadlock) and don't call hrtimer_try_to_cancel() (since it will + * just return -1). Though callback_fn is still running on this cpu it's * safe to do kfree(t) because bpf_timer_cb() read everything it needed * from 't'. The bpf subprog callback_fn won't be able to access 't', * since async->cb = NULL was already done. The timer will be * effectively cancelled because bpf_timer_cb() will return * HRTIMER_NORESTART. + * + * However, it is possible the timer callback_fn calling us armed the + * timer _before_ calling us, such that failing to cancel it here will + * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. + * Therefore, we _need_ to cancel any outstanding timers before we do + * kfree_rcu, even though no more timers can be armed. + * + * Moreover, we need to schedule work even if timer does not belong to + * the calling callback_fn, as on two different CPUs, we can end up in a + * situation where both sides run in parallel, try to cancel one + * another, and we end up waiting on both sides in hrtimer_cancel + * without making forward progress, since timer1 depends on time2 + * callback to finish, and vice versa. + * + * CPU 1 (timer1_cb) CPU 2 (timer2_cb) + * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) + * + * To avoid these issues, punt to workqueue context when we are in a + * timer callback. */ - if (this_cpu_read(hrtimer_running) != t) - hrtimer_cancel(&t->timer); - kfree_rcu(t, cb.rcu); + if (this_cpu_read(hrtimer_running)) + queue_work(system_unbound_wq, &t->cb.delete_work); + else + bpf_timer_delete_work(&t->cb.delete_work); } /* This function is called by map_delete/update_elem for individual element and diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3d6306c363b7..8da132a1ef28 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11335,7 +11335,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) strict_type_match = true; - WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); + WARN_ON_ONCE(is_kfunc_release(meta) && + (reg->off || !tnum_is_const(reg->var_off) || + reg->var_off.value)); reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); @@ -11917,12 +11919,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } } - fallthrough; case KF_ARG_PTR_TO_CTX: - /* Trusted arguments have the same offset checks as release arguments */ - arg_type |= OBJ_RELEASE; - break; case KF_ARG_PTR_TO_DYNPTR: case KF_ARG_PTR_TO_ITER: case KF_ARG_PTR_TO_LIST_HEAD: @@ -11935,7 +11933,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_REFCOUNTED_KPTR: case KF_ARG_PTR_TO_CONST_STR: case KF_ARG_PTR_TO_WORKQUEUE: - /* Trusted by default */ break; default: WARN_ON_ONCE(1); @@ -12729,56 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } -static bool signed_add_overflows(s64 a, s64 b) -{ - /* Do the add in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a + (u64)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add32_overflows(s32 a, s32 b) -{ - /* Do the add in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a + (u32)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add16_overflows(s16 a, s16 b) -{ - /* Do the add in u16, where overflow is well-defined */ - s16 res = (s16)((u16)a + (u16)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_sub_overflows(s64 a, s64 b) -{ - /* Do the sub in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a - (u64)b); - - if (b < 0) - return res < a; - return res > a; -} - -static bool signed_sub32_overflows(s32 a, s32 b) -{ - /* Do the sub in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a - (u32)b); - - if (b < 0) - return res < a; - return res > a; -} - static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) @@ -13260,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ - if (signed_add_overflows(smin_ptr, smin_val) || - signed_add_overflows(smax_ptr, smax_val)) { + if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || + check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value = smin_ptr + smin_val; - dst_reg->smax_value = smax_ptr + smax_val; } - if (umin_ptr + umin_val < umin_ptr || - umax_ptr + umax_val < umax_ptr) { + if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || + check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value = umin_ptr + umin_val; - dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; @@ -13317,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ - if (signed_sub_overflows(smin_ptr, smax_val) || - signed_sub_overflows(smax_ptr, smin_val)) { + if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || + check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value = smin_ptr - smax_val; - dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ @@ -13377,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s32 smin_val = src_reg->s32_min_value; - s32 smax_val = src_reg->s32_max_value; - u32 umin_val = src_reg->u32_min_value; - u32 umax_val = src_reg->u32_max_value; + s32 *dst_smin = &dst_reg->s32_min_value; + s32 *dst_smax = &dst_reg->s32_max_value; + u32 *dst_umin = &dst_reg->u32_min_value; + u32 *dst_umax = &dst_reg->u32_max_value; - if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || - signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { - dst_reg->s32_min_value = S32_MIN; - dst_reg->s32_max_value = S32_MAX; - } else { - dst_reg->s32_min_value += smin_val; - dst_reg->s32_max_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { + *dst_smin = S32_MIN; + *dst_smax = S32_MAX; } - if (dst_reg->u32_min_value + umin_val < umin_val || - dst_reg->u32_max_value + umax_val < umax_val) { - dst_reg->u32_min_value = 0; - dst_reg->u32_max_value = U32_MAX; - } else { - dst_reg->u32_min_value += umin_val; - dst_reg->u32_max_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U32_MAX; } } static void scalar_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s64 smin_val = src_reg->smin_value; - s64 smax_val = src_reg->smax_value; - u64 umin_val = src_reg->umin_value; - u64 umax_val = src_reg->umax_value; + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; + u64 *dst_umin = &dst_reg->umin_value; + u64 *dst_umax = &dst_reg->umax_value; - if (signed_add_overflows(dst_reg->smin_value, smin_val) || - signed_add_overflows(dst_reg->smax_value, smax_val)) { - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value += smin_val; - dst_reg->smax_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { + *dst_smin = S64_MIN; + *dst_smax = S64_MAX; } - if (dst_reg->umin_value + umin_val < umin_val || - dst_reg->umax_value + umax_val < umax_val) { - dst_reg->umin_value = 0; - dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value += umin_val; - dst_reg->umax_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U64_MAX; } } static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s32 smin_val = src_reg->s32_min_value; - s32 smax_val = src_reg->s32_max_value; + s32 *dst_smin = &dst_reg->s32_min_value; + s32 *dst_smax = &dst_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; - if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || - signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { + if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || + check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { /* Overflow possible, we know nothing */ - dst_reg->s32_min_value = S32_MIN; - dst_reg->s32_max_value = S32_MAX; - } else { - dst_reg->s32_min_value -= smax_val; - dst_reg->s32_max_value -= smin_val; + *dst_smin = S32_MIN; + *dst_smax = S32_MAX; } if (dst_reg->u32_min_value < umax_val) { /* Overflow possible, we know nothing */ @@ -13457,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s64 smin_val = src_reg->smin_value; - s64 smax_val = src_reg->smax_value; + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; - if (signed_sub_overflows(dst_reg->smin_value, smax_val) || - signed_sub_overflows(dst_reg->smax_value, smin_val)) { + if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || + check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { /* Overflow possible, we know nothing */ - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value -= smax_val; - dst_reg->smax_value -= smin_val; + *dst_smin = S64_MIN; + *dst_smax = S64_MAX; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ @@ -18838,6 +18758,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) { struct bpf_insn *insn = prog->insnsi; u32 insn_cnt = prog->len, i; + s32 imm; + s16 off; for (i = 0; i < insn_cnt; i++, insn++) { u8 code = insn->code; @@ -18849,15 +18771,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) if (insn->code == (BPF_JMP32 | BPF_JA)) { if (i + 1 + insn->imm != tgt_idx) continue; - if (signed_add32_overflows(insn->imm, delta)) + if (check_add_overflow(insn->imm, delta, &imm)) return -ERANGE; - insn->imm += delta; + insn->imm = imm; } else { if (i + 1 + insn->off != tgt_idx) continue; - if (signed_add16_overflows(insn->imm, delta)) + if (check_add_overflow(insn->off, delta, &off)) return -ERANGE; - insn->off += delta; + insn->off = off; } } return 0; diff --git a/lib/build_OID_registry b/lib/build_OID_registry index 56d8bafeb848..8267e8d71338 100755 --- a/lib/build_OID_registry +++ b/lib/build_OID_registry @@ -38,7 +38,9 @@ close IN_FILE || die; # open C_FILE, ">$ARGV[1]" or die; print C_FILE "/*\n"; -print C_FILE " * Automatically generated by ", $0 =~ s#^\Q$abs_srctree/\E##r, ". Do not edit\n"; +my $scriptname = $0; +$scriptname =~ s#^\Q$abs_srctree/\E##; +print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n"; print C_FILE " */\n"; # diff --git a/lib/closure.c b/lib/closure.c index c971216d9d77..116afae2eed9 100644 --- a/lib/closure.c +++ b/lib/closure.c @@ -244,6 +244,9 @@ void closure_debug_destroy(struct closure *cl) { unsigned long flags; + if (cl->magic == CLOSURE_MAGIC_STACK) + return; + BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE); cl->magic = CLOSURE_MAGIC_DEAD; diff --git a/mm/damon/core.c b/mm/damon/core.c index 6392f1cc97a3..e66823d6b10b 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1358,14 +1358,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, * access frequencies are similar. This is for minimizing the monitoring * overhead under the dynamically changeable access pattern. If a merge was * unnecessarily made, later 'kdamond_split_regions()' will revert it. + * + * The total number of regions could be higher than the user-defined limit, + * max_nr_regions for some cases. For example, the user can update + * max_nr_regions to a number that lower than the current number of regions + * while DAMON is running. For such a case, repeat merging until the limit is + * met while increasing @threshold up to possible maximum level. */ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, unsigned long sz_limit) { struct damon_target *t; - - damon_for_each_target(t, c) - damon_merge_regions_of(t, threshold, sz_limit); + unsigned int nr_regions; + unsigned int max_thres; + + max_thres = c->attrs.aggr_interval / + (c->attrs.sample_interval ? c->attrs.sample_interval : 1); + do { + nr_regions = 0; + damon_for_each_target(t, c) { + damon_merge_regions_of(t, threshold, sz_limit); + nr_regions += damon_nr_regions(t); + } + threshold = max(1, threshold * 2); + } while (nr_regions > c->attrs.max_nr_regions && + threshold / 2 < max_thres); } /* diff --git a/mm/filemap.c b/mm/filemap.c index 876cc64aadd7..657bcd887fdb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1847,7 +1847,7 @@ repeat: if (!folio || xa_is_value(folio)) goto out; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto repeat; if (unlikely(folio != xas_reload(&xas))) { @@ -2001,7 +2001,7 @@ retry: if (!folio || xa_is_value(folio)) return folio; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto reset; if (unlikely(folio != xas_reload(xas))) { @@ -2181,7 +2181,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (xa_is_value(folio)) goto update_start; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) @@ -2313,7 +2313,7 @@ static void filemap_get_read_batch(struct address_space *mapping, break; if (xa_is_sibling(folio)) break; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) @@ -3124,7 +3124,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ - if (vm_flags & VM_HUGEPAGE) { + if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ra->size = HPAGE_PMD_NR; @@ -3231,7 +3231,8 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) return 0; - ptep = pte_offset_map(vmf->pmd, vmf->address); + ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address, + &vmf->ptl); if (unlikely(!ptep)) return VM_FAULT_NOPAGE; @@ -3472,7 +3473,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, continue; if (folio_test_locked(folio)) continue; - if (!folio_try_get_rcu(folio)) + if (!folio_try_get(folio)) continue; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(xas))) @@ -4248,6 +4249,9 @@ static void filemap_cachestat(struct address_space *mapping, XA_STATE(xas, &mapping->i_pages, first_index); struct folio *folio; + /* Flush stats (and potentially sleep) outside the RCU read section. */ + mem_cgroup_flush_stats_ratelimited(NULL); + rcu_read_lock(); xas_for_each(&xas, folio, last_index) { int order; @@ -4311,7 +4315,7 @@ static void filemap_cachestat(struct address_space *mapping, goto resched; } #endif - if (workingset_test_recent(shadow, true, &workingset)) + if (workingset_test_recent(shadow, true, &workingset, false)) cs->nr_recently_evicted += nr_pages; goto resched; @@ -76,7 +76,7 @@ retry: folio = page_folio(page); if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) return NULL; - if (unlikely(!folio_ref_try_add_rcu(folio, refs))) + if (unlikely(!folio_ref_try_add(folio, refs))) return NULL; /* @@ -97,95 +97,6 @@ retry: return folio; } -/** - * try_grab_folio() - Attempt to get or pin a folio. - * @page: pointer to page to be grabbed - * @refs: the value to (effectively) add to the folio's refcount - * @flags: gup flags: these are the FOLL_* flag values. - * - * "grab" names in this file mean, "look at flags to decide whether to use - * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. - * - * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the - * same time. (That's true throughout the get_user_pages*() and - * pin_user_pages*() APIs.) Cases: - * - * FOLL_GET: folio's refcount will be incremented by @refs. - * - * FOLL_PIN on large folios: folio's refcount will be incremented by - * @refs, and its pincount will be incremented by @refs. - * - * FOLL_PIN on single-page folios: folio's refcount will be incremented by - * @refs * GUP_PIN_COUNTING_BIAS. - * - * Return: The folio containing @page (with refcount appropriately - * incremented) for success, or NULL upon failure. If neither FOLL_GET - * nor FOLL_PIN was set, that's considered failure, and furthermore, - * a likely bug in the caller, so a warning is also emitted. - */ -struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) -{ - struct folio *folio; - - if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) - return NULL; - - if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) - return NULL; - - if (flags & FOLL_GET) - return try_get_folio(page, refs); - - /* FOLL_PIN is set */ - - /* - * Don't take a pin on the zero page - it's not going anywhere - * and it is used in a *lot* of places. - */ - if (is_zero_page(page)) - return page_folio(page); - - folio = try_get_folio(page, refs); - if (!folio) - return NULL; - - /* - * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a - * right zone, so fail and let the caller fall back to the slow - * path. - */ - if (unlikely((flags & FOLL_LONGTERM) && - !folio_is_longterm_pinnable(folio))) { - if (!put_devmap_managed_folio_refs(folio, refs)) - folio_put_refs(folio, refs); - return NULL; - } - - /* - * When pinning a large folio, use an exact count to track it. - * - * However, be sure to *also* increment the normal folio - * refcount field at least once, so that the folio really - * is pinned. That's why the refcount from the earlier - * try_get_folio() is left intact. - */ - if (folio_test_large(folio)) - atomic_add(refs, &folio->_pincount); - else - folio_ref_add(folio, - refs * (GUP_PIN_COUNTING_BIAS - 1)); - /* - * Adjust the pincount before re-checking the PTE for changes. - * This is essentially a smp_mb() and is paired with a memory - * barrier in folio_try_share_anon_rmap_*(). - */ - smp_mb__after_atomic(); - - node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); - - return folio; -} - static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) { if (flags & FOLL_PIN) { @@ -203,58 +114,59 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) } /** - * try_grab_page() - elevate a page's refcount by a flag-dependent amount - * @page: pointer to page to be grabbed - * @flags: gup flags: these are the FOLL_* flag values. + * try_grab_folio() - add a folio's refcount by a flag-dependent amount + * @folio: pointer to folio to be grabbed + * @refs: the value to (effectively) add to the folio's refcount + * @flags: gup flags: these are the FOLL_* flag values * * This might not do anything at all, depending on the flags argument. * * "grab" names in this file mean, "look at flags to decide whether to use - * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. + * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. * * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same - * time. Cases: please see the try_grab_folio() documentation, with - * "refs=1". + * time. * * Return: 0 for success, or if no action was required (if neither FOLL_PIN * nor FOLL_GET was set, nothing is done). A negative error code for failure: * - * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not + * -ENOMEM FOLL_GET or FOLL_PIN was set, but the folio could not * be grabbed. + * + * It is called when we have a stable reference for the folio, typically in + * GUP slow path. */ -int __must_check try_grab_page(struct page *page, unsigned int flags) +int __must_check try_grab_folio(struct folio *folio, int refs, + unsigned int flags) { - struct folio *folio = page_folio(page); - if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) return -ENOMEM; - if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) + if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page))) return -EREMOTEIO; if (flags & FOLL_GET) - folio_ref_inc(folio); + folio_ref_add(folio, refs); else if (flags & FOLL_PIN) { /* * Don't take a pin on the zero page - it's not going anywhere * and it is used in a *lot* of places. */ - if (is_zero_page(page)) + if (is_zero_folio(folio)) return 0; /* - * Similar to try_grab_folio(): be sure to *also* - * increment the normal page refcount field at least once, + * Increment the normal page refcount field at least once, * so that the page really is pinned. */ if (folio_test_large(folio)) { - folio_ref_add(folio, 1); - atomic_add(1, &folio->_pincount); + folio_ref_add(folio, refs); + atomic_add(refs, &folio->_pincount); } else { - folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); + folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS); } - node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); + node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); } return 0; @@ -515,6 +427,102 @@ static int record_subpages(struct page *page, unsigned long sz, return nr; } + +/** + * try_grab_folio_fast() - Attempt to get or pin a folio in fast path. + * @page: pointer to page to be grabbed + * @refs: the value to (effectively) add to the folio's refcount + * @flags: gup flags: these are the FOLL_* flag values. + * + * "grab" names in this file mean, "look at flags to decide whether to use + * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. + * + * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the + * same time. (That's true throughout the get_user_pages*() and + * pin_user_pages*() APIs.) Cases: + * + * FOLL_GET: folio's refcount will be incremented by @refs. + * + * FOLL_PIN on large folios: folio's refcount will be incremented by + * @refs, and its pincount will be incremented by @refs. + * + * FOLL_PIN on single-page folios: folio's refcount will be incremented by + * @refs * GUP_PIN_COUNTING_BIAS. + * + * Return: The folio containing @page (with refcount appropriately + * incremented) for success, or NULL upon failure. If neither FOLL_GET + * nor FOLL_PIN was set, that's considered failure, and furthermore, + * a likely bug in the caller, so a warning is also emitted. + * + * It uses add ref unless zero to elevate the folio refcount and must be called + * in fast path only. + */ +static struct folio *try_grab_folio_fast(struct page *page, int refs, + unsigned int flags) +{ + struct folio *folio; + + /* Raise warn if it is not called in fast GUP */ + VM_WARN_ON_ONCE(!irqs_disabled()); + + if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) + return NULL; + + if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) + return NULL; + + if (flags & FOLL_GET) + return try_get_folio(page, refs); + + /* FOLL_PIN is set */ + + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ + if (is_zero_page(page)) + return page_folio(page); + + folio = try_get_folio(page, refs); + if (!folio) + return NULL; + + /* + * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a + * right zone, so fail and let the caller fall back to the slow + * path. + */ + if (unlikely((flags & FOLL_LONGTERM) && + !folio_is_longterm_pinnable(folio))) { + if (!put_devmap_managed_folio_refs(folio, refs)) + folio_put_refs(folio, refs); + return NULL; + } + + /* + * When pinning a large folio, use an exact count to track it. + * + * However, be sure to *also* increment the normal folio + * refcount field at least once, so that the folio really + * is pinned. That's why the refcount from the earlier + * try_get_folio() is left intact. + */ + if (folio_test_large(folio)) + atomic_add(refs, &folio->_pincount); + else + folio_ref_add(folio, + refs * (GUP_PIN_COUNTING_BIAS - 1)); + /* + * Adjust the pincount before re-checking the PTE for changes. + * This is essentially a smp_mb() and is paired with a memory + * barrier in folio_try_share_anon_rmap_*(). + */ + smp_mb__after_atomic(); + + node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); + + return folio; +} #endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_GUP_FAST */ #ifdef CONFIG_ARCH_HAS_HUGEPD @@ -535,7 +543,7 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, */ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, unsigned int flags, - struct page **pages, int *nr) + struct page **pages, int *nr, bool fast) { unsigned long pte_end; struct page *page; @@ -558,9 +566,15 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz page = pte_page(pte); refs = record_subpages(page, sz, addr, end, pages + *nr); - folio = try_grab_folio(page, refs, flags); - if (!folio) - return 0; + if (fast) { + folio = try_grab_folio_fast(page, refs, flags); + if (!folio) + return 0; + } else { + folio = page_folio(page); + if (try_grab_folio(folio, refs, flags)) + return 0; + } if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, refs, flags); @@ -588,7 +602,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, - struct page **pages, int *nr) + struct page **pages, int *nr, bool fast) { pte_t *ptep; unsigned long sz = 1UL << hugepd_shift(hugepd); @@ -598,7 +612,8 @@ static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, ptep = hugepte_offset(hugepd, addr, pdshift); do { next = hugepte_addr_end(addr, end, sz); - ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr); + ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr, + fast); if (ret != 1) return ret; } while (ptep++, addr = next, addr != end); @@ -625,7 +640,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, ptep = hugepte_offset(hugepd, addr, pdshift); ptl = huge_pte_lock(h, vma->vm_mm, ptep); ret = gup_hugepd(vma, hugepd, addr, pdshift, addr + PAGE_SIZE, - flags, &page, &nr); + flags, &page, &nr, false); spin_unlock(ptl); if (ret == 1) { @@ -642,7 +657,7 @@ static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, static inline int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, - struct page **pages, int *nr) + struct page **pages, int *nr, bool fast) { return 0; } @@ -729,7 +744,7 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma, gup_must_unshare(vma, flags, page)) return ERR_PTR(-EMLINK); - ret = try_grab_page(page, flags); + ret = try_grab_folio(page_folio(page), 1, flags); if (ret) page = ERR_PTR(ret); else @@ -806,7 +821,7 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma, VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); - ret = try_grab_page(page, flags); + ret = try_grab_folio(page_folio(page), 1, flags); if (ret) return ERR_PTR(ret); @@ -968,8 +983,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); - /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ - ret = try_grab_page(page, flags); + /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */ + ret = try_grab_folio(page_folio(page), 1, flags); if (unlikely(ret)) { page = ERR_PTR(ret); goto out; @@ -1233,7 +1248,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, goto unmap; *page = pte_page(entry); } - ret = try_grab_page(*page, gup_flags); + ret = try_grab_folio(page_folio(*page), 1, gup_flags); if (unlikely(ret)) goto unmap; out: @@ -1636,20 +1651,19 @@ next_page: * pages. */ if (page_increm > 1) { - struct folio *folio; + struct folio *folio = page_folio(page); /* * Since we already hold refcount on the * large folio, this should never fail. */ - folio = try_grab_folio(page, page_increm - 1, - foll_flags); - if (WARN_ON_ONCE(!folio)) { + if (try_grab_folio(folio, page_increm - 1, + foll_flags)) { /* * Release the 1st page ref if the * folio is problematic, fail hard. */ - gup_put_folio(page_folio(page), 1, + gup_put_folio(folio, 1, foll_flags); ret = -EFAULT; goto out; @@ -2797,7 +2811,6 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GUP_FAST - /* * Used in the GUP-fast path to determine whether GUP is permitted to work on * a specific folio. @@ -2962,7 +2975,7 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); - folio = try_grab_folio(page, 1, flags); + folio = try_grab_folio_fast(page, 1, flags); if (!folio) goto pte_unmap; @@ -3049,7 +3062,7 @@ static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr, break; } - folio = try_grab_folio(page, 1, flags); + folio = try_grab_folio_fast(page, 1, flags); if (!folio) { gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages); break; @@ -3138,7 +3151,7 @@ static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr, page = pmd_page(orig); refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr); - folio = try_grab_folio(page, refs, flags); + folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; @@ -3182,7 +3195,7 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr, page = pud_page(orig); refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr); - folio = try_grab_folio(page, refs, flags); + folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; @@ -3222,7 +3235,7 @@ static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr, page = pgd_page(orig); refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr); - folio = try_grab_folio(page, refs, flags); + folio = try_grab_folio_fast(page, refs, flags); if (!folio) return 0; @@ -3276,7 +3289,8 @@ static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, * pmd format and THP pmd format */ if (gup_hugepd(NULL, __hugepd(pmd_val(pmd)), addr, - PMD_SHIFT, next, flags, pages, nr) != 1) + PMD_SHIFT, next, flags, pages, nr, + true) != 1) return 0; } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) @@ -3306,7 +3320,8 @@ static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (gup_hugepd(NULL, __hugepd(pud_val(pud)), addr, - PUD_SHIFT, next, flags, pages, nr) != 1) + PUD_SHIFT, next, flags, pages, nr, + true) != 1) return 0; } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags, pages, nr)) @@ -3333,7 +3348,8 @@ static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, BUILD_BUG_ON(p4d_leaf(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (gup_hugepd(NULL, __hugepd(p4d_val(p4d)), addr, - P4D_SHIFT, next, flags, pages, nr) != 1) + P4D_SHIFT, next, flags, pages, nr, + true) != 1) return 0; } else if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) @@ -3362,7 +3378,8 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end, return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (gup_hugepd(NULL, __hugepd(pgd_val(pgd)), addr, - PGDIR_SHIFT, next, flags, pages, nr) != 1) + PGDIR_SHIFT, next, flags, pages, nr, + true) != 1) return; } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index db7946a0a28c..2120f7478e55 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1331,7 +1331,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, if (!*pgmap) return ERR_PTR(-EFAULT); page = pfn_to_page(pfn); - ret = try_grab_page(page, flags); + ret = try_grab_folio(page_folio(page), 1, flags); if (ret) page = ERR_PTR(ret); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f35abff8be60..43e1af868cfd 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1625,13 +1625,10 @@ static inline void destroy_compound_gigantic_folio(struct folio *folio, * folio appears as just a compound page. Otherwise, wait until after * allocating vmemmap to clear the flag. * - * A reference is held on the folio, except in the case of demote. - * * Must be called with hugetlb lock held. */ -static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, - bool adjust_surplus, - bool demote) +static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, + bool adjust_surplus) { int nid = folio_nid(folio); @@ -1645,6 +1642,7 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, list_del(&folio->lru); if (folio_test_hugetlb_freed(folio)) { + folio_clear_hugetlb_freed(folio); h->free_huge_pages--; h->free_huge_pages_node[nid]--; } @@ -1661,33 +1659,13 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, if (!folio_test_hugetlb_vmemmap_optimized(folio)) __folio_clear_hugetlb(folio); - /* - * In the case of demote we do not ref count the page as it will soon - * be turned into a page of smaller size. - */ - if (!demote) - folio_ref_unfreeze(folio, 1); - h->nr_huge_pages--; h->nr_huge_pages_node[nid]--; } -static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, - bool adjust_surplus) -{ - __remove_hugetlb_folio(h, folio, adjust_surplus, false); -} - -static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, - bool adjust_surplus) -{ - __remove_hugetlb_folio(h, folio, adjust_surplus, true); -} - static void add_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus) { - int zeroed; int nid = folio_nid(folio); VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); @@ -1711,21 +1689,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, */ folio_set_hugetlb_vmemmap_optimized(folio); - /* - * This folio is about to be managed by the hugetlb allocator and - * should have no users. Drop our reference, and check for others - * just in case. - */ - zeroed = folio_put_testzero(folio); - if (unlikely(!zeroed)) - /* - * It is VERY unlikely soneone else has taken a ref - * on the folio. In this case, we simply return as - * free_huge_folio() will be called when this other ref - * is dropped. - */ - return; - arch_clear_hugetlb_flags(folio); enqueue_hugetlb_folio(h, folio); } @@ -1763,13 +1726,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, } /* - * Move PageHWPoison flag from head page to the raw error pages, - * which makes any healthy subpages reusable. - */ - if (unlikely(folio_test_hwpoison(folio))) - folio_clear_hugetlb_hwpoison(folio); - - /* * If vmemmap pages were allocated above, then we need to clear the * hugetlb flag under the hugetlb lock. */ @@ -1780,6 +1736,15 @@ static void __update_and_free_hugetlb_folio(struct hstate *h, } /* + * Move PageHWPoison flag from head page to the raw error pages, + * which makes any healthy subpages reusable. + */ + if (unlikely(folio_test_hwpoison(folio))) + folio_clear_hugetlb_hwpoison(folio); + + folio_ref_unfreeze(folio, 1); + + /* * Non-gigantic pages demoted from CMA allocated gigantic pages * need to be given back to CMA in free_gigantic_folio. */ @@ -2197,6 +2162,9 @@ static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, nid = numa_mem_id(); retry: folio = __folio_alloc(gfp_mask, order, nid, nmask); + /* Ensure hugetlb folio won't have large_rmappable flag set. */ + if (folio) + folio_clear_large_rmappable(folio); if (folio && !folio_ref_freeze(folio, 1)) { folio_put(folio); @@ -3079,11 +3047,8 @@ retry: free_new: spin_unlock_irq(&hugetlb_lock); - if (new_folio) { - /* Folio has a zero ref count, but needs a ref to be freed */ - folio_ref_unfreeze(new_folio, 1); + if (new_folio) update_and_free_hugetlb_folio(h, new_folio, false); - } return ret; } @@ -3938,7 +3903,7 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); - remove_hugetlb_folio_for_demote(h, folio, false); + remove_hugetlb_folio(h, folio, false); spin_unlock_irq(&hugetlb_lock); /* @@ -3952,7 +3917,6 @@ static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) if (rc) { /* Allocation of vmemmmap failed, we can not demote folio */ spin_lock_irq(&hugetlb_lock); - folio_ref_unfreeze(folio, 1); add_hugetlb_folio(h, folio, false); return rc; } diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index b9a55322e52c..8193906515c6 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -446,6 +446,8 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, unsigned long vmemmap_reuse; VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio); + if (!folio_test_hugetlb_vmemmap_optimized(folio)) return 0; @@ -481,6 +483,9 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, */ int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) { + /* avoid writes from page_ref_add_unless() while unfolding vmemmap */ + synchronize_rcu(); + return __hugetlb_vmemmap_restore_folio(h, folio, 0); } @@ -505,6 +510,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h, long restored = 0; long ret = 0; + /* avoid writes from page_ref_add_unless() while unfolding vmemmap */ + synchronize_rcu(); + list_for_each_entry_safe(folio, t_folio, folio_list, lru) { if (folio_test_hugetlb_vmemmap_optimized(folio)) { ret = __hugetlb_vmemmap_restore_folio(h, folio, @@ -550,6 +558,8 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h, unsigned long vmemmap_reuse; VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio); + if (!vmemmap_should_optimize_folio(h, folio)) return ret; @@ -601,6 +611,9 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) { LIST_HEAD(vmemmap_pages); + /* avoid writes from page_ref_add_unless() while folding vmemmap */ + synchronize_rcu(); + __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0); free_vmemmap_page_list(&vmemmap_pages); } @@ -644,6 +657,9 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l flush_tlb_all(); + /* avoid writes from page_ref_add_unless() while folding vmemmap */ + synchronize_rcu(); + list_for_each_entry(folio, folio_list, lru) { int ret; diff --git a/mm/internal.h b/mm/internal.h index 6902b7dd8509..cc2c5e07fad3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1182,8 +1182,8 @@ int migrate_device_coherent_page(struct page *page); /* * mm/gup.c */ -struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags); -int __must_check try_grab_page(struct page *page, unsigned int flags); +int __must_check try_grab_folio(struct folio *folio, int refs, + unsigned int flags); /* * mm/huge_memory.c diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 71fe2a95b8bd..8f2f1bb18c9c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7823,17 +7823,6 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new) /* Transfer the charge and the css ref */ commit_charge(new, memcg); - /* - * If the old folio is a large folio and is in the split queue, it needs - * to be removed from the split queue now, in case getting an incorrect - * split queue in destroy_large_folio() after the memcg of the old folio - * is cleared. - * - * In addition, the old folio is about to be freed after migration, so - * removing from the split queue a bit earlier seems reasonable. - */ - if (folio_test_large(old) && folio_test_large_rmappable(old)) - folio_undo_large_rmappable(old); old->memcg_data = 0; } diff --git a/mm/migrate.c b/mm/migrate.c index 20cb9f5f7446..a8c6f466e33a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -415,6 +415,15 @@ int folio_migrate_mapping(struct address_space *mapping, if (folio_ref_count(folio) != expected_count) return -EAGAIN; + /* Take off deferred split queue while frozen and memcg set */ + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) { + if (!folio_ref_freeze(folio, expected_count)) + return -EAGAIN; + folio_undo_large_rmappable(folio); + folio_ref_unfreeze(folio, expected_count); + } + /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; @@ -433,6 +442,10 @@ int folio_migrate_mapping(struct address_space *mapping, return -EAGAIN; } + /* Take off deferred split queue while frozen and memcg set */ + if (folio_test_large(folio) && folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); + /* * Now we know that no one else is looking at the folio: * no turning back from here. diff --git a/mm/readahead.c b/mm/readahead.c index c1b23989d9ca..817b2a352d78 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -503,11 +503,11 @@ void page_cache_ra_order(struct readahead_control *ractl, limit = min(limit, index + ra->size - 1); - if (new_order < MAX_PAGECACHE_ORDER) { + if (new_order < MAX_PAGECACHE_ORDER) new_order += 2; - new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order); - new_order = min_t(unsigned int, new_order, ilog2(ra->size)); - } + + new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order); + new_order = min_t(unsigned int, new_order, ilog2(ra->size)); /* See comment in page_cache_ra_unbounded() */ nofs = memalloc_nofs_save(); diff --git a/mm/shmem.c b/mm/shmem.c index a8b181a63402..c1befe046c7e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -541,8 +541,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags) +static bool __shmem_is_huge(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct mm_struct *mm, + unsigned long vm_flags) { loff_t i_size; @@ -573,6 +574,16 @@ bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, } } +bool shmem_is_huge(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct mm_struct *mm, + unsigned long vm_flags) +{ + if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) + return false; + + return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); +} + #if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d0cbdd7c1e5b..e34ea860153f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2543,7 +2543,15 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); static struct xarray * addr_to_vb_xa(unsigned long addr) { - int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); + int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; + + /* + * Please note, nr_cpu_ids points on a highest set + * possible bit, i.e. we never invoke cpumask_next() + * if an index points on it which is nr_cpu_ids - 1. + */ + if (!cpu_possible(index)) + index = cpumask_next(index, cpu_possible_mask); return &per_cpu(vmap_block_queue, index).vmap_blocks; } diff --git a/mm/workingset.c b/mm/workingset.c index c22adb93622a..a2b28e356e68 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -412,10 +412,12 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) * @file: whether the corresponding folio is from the file lru. * @workingset: where the workingset value unpacked from shadow should * be stored. + * @flush: whether to flush cgroup rstat. * * Return: true if the shadow is for a recently evicted folio; false otherwise. */ -bool workingset_test_recent(void *shadow, bool file, bool *workingset) +bool workingset_test_recent(void *shadow, bool file, bool *workingset, + bool flush) { struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; @@ -467,10 +469,16 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) /* * Flush stats (and potentially sleep) outside the RCU read section. + * + * Note that workingset_test_recent() itself might be called in RCU read + * section (for e.g, in cachestat) - these callers need to skip flushing + * stats (via the flush argument). + * * XXX: With per-memcg flushing and thresholding, is ratelimiting * still needed here? */ - mem_cgroup_flush_stats_ratelimited(eviction_memcg); + if (flush) + mem_cgroup_flush_stats_ratelimited(eviction_memcg); eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); @@ -558,7 +566,7 @@ void workingset_refault(struct folio *folio, void *shadow) mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); - if (!workingset_test_recent(shadow, file, &workingset)) + if (!workingset_test_recent(shadow, file, &workingset, true)) return; folio_set_active(folio); diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 1daf95e17d67..3a5bd1cd1e99 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -429,7 +429,10 @@ static int is_out(const struct crush_map *map, /** * crush_choose_firstn - choose numrep distinct items of given type * @map: the crush_map + * @work: working space initialized by crush_init_workspace() * @bucket: the bucket we are choose an item from + * @weight: weight vector (for map leaves) + * @weight_max: size of weight vector * @x: crush input value * @numrep: the number of items to choose * @type: the type of item to choose @@ -445,6 +448,7 @@ static int is_out(const struct crush_map *map, * @vary_r: pass r to recursive calls * @out2: second output vector for leaf items (if @recurse_to_leaf) * @parent_r: r value passed from the parent + * @choose_args: weights and ids for each known bucket */ static int crush_choose_firstn(const struct crush_map *map, struct crush_work *work, @@ -636,9 +640,8 @@ reject: } -/** +/* * crush_choose_indep: alternative breadth-first positionally stable mapping - * */ static void crush_choose_indep(const struct crush_map *map, struct crush_work *work, diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index f263f7e91a21..ab66b599ac47 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work) struct ceph_mon_client *monc = container_of(work, struct ceph_mon_client, delayed_work.work); - dout("monc delayed_work\n"); mutex_lock(&monc->mutex); + dout("%s mon%d\n", __func__, monc->cur_mon); + if (monc->cur_mon < 0) { + goto out; + } + if (monc->hunting) { dout("%s continuing hunt\n", __func__); reopen_session(monc); } else { int is_auth = ceph_auth_is_authenticated(monc->auth); + + dout("%s is_authed %d\n", __func__, is_auth); if (ceph_con_keepalive_expired(&monc->con, CEPH_MONC_PING_TIMEOUT)) { dout("monc keepalive timeout\n"); @@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work) } } __schedule_delayed(monc); + +out: mutex_unlock(&monc->mutex); } @@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init); void ceph_monc_stop(struct ceph_mon_client *monc) { dout("stop\n"); - cancel_delayed_work_sync(&monc->delayed_work); mutex_lock(&monc->mutex); __close_session(monc); + monc->hunting = false; monc->cur_mon = -1; mutex_unlock(&monc->mutex); + cancel_delayed_work_sync(&monc->delayed_work); + /* * flush msgr queue before we destroy ourselves to ensure that: * - any work that references our embedded con is finished. diff --git a/net/core/datagram.c b/net/core/datagram.c index b2f391edb9e8..a40f733b37d7 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -423,11 +423,12 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, if (copy > len) copy = len; + n = 0; skb_frag_foreach_page(frag, skb_frag_off(frag) + offset - start, copy, p, p_off, p_len, copied) { vaddr = kmap_local_page(p); - n = INDIRECT_CALL_1(cb, simple_copy_to_iter, + n += INDIRECT_CALL_1(cb, simple_copy_to_iter, vaddr + p_off, p_len, data, to); kunmap_local(vaddr); } diff --git a/net/core/dev.c b/net/core/dev.c index 73e5af6943c3..6ea1d20676fb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -11006,13 +11006,6 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev) } EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); -void netdev_freemem(struct net_device *dev) -{ - char *addr = (char *)dev - dev->padded; - - kvfree(addr); -} - /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for @@ -11032,8 +11025,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned int txqs, unsigned int rxqs) { struct net_device *dev; - unsigned int alloc_size; - struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -11047,21 +11038,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, return NULL; } - alloc_size = sizeof(struct net_device); - if (sizeof_priv) { - /* ensure 32-byte alignment of private area */ - alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); - alloc_size += sizeof_priv; - } - /* ensure 32-byte alignment of whole construct */ - alloc_size += NETDEV_ALIGN - 1; - - p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); - if (!p) + dev = kvzalloc(struct_size(dev, priv, sizeof_priv), + GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); + if (!dev) return NULL; - dev = PTR_ALIGN(p, NETDEV_ALIGN); - dev->padded = (char *)dev - (char *)p; + dev->priv_len = sizeof_priv; ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); #ifdef CONFIG_PCPU_DEV_REFCNT @@ -11148,7 +11130,7 @@ free_pcpu: free_percpu(dev->pcpu_refcnt); free_dev: #endif - netdev_freemem(dev); + kvfree(dev); return NULL; } EXPORT_SYMBOL(alloc_netdev_mqs); @@ -11203,7 +11185,7 @@ void free_netdev(struct net_device *dev) /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED || dev->reg_state == NETREG_DUMMY) { - netdev_freemem(dev); + kvfree(dev); return; } diff --git a/net/core/filter.c b/net/core/filter.c index d767880c276d..4cf1d34f7617 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11053,7 +11053,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { }; const struct bpf_prog_ops lwt_seg6local_prog_ops = { - .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops cg_sock_verifier_ops = { diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4c27a360c294..0e2084ce7b75 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -2028,7 +2028,7 @@ static void netdev_release(struct device *d) * device is dead and about to be freed. */ kfree(rcu_access_pointer(dev->ifalias)); - netdev_freemem(dev); + kvfree(dev); } static const void *net_namespace(const struct device *d) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 855271a6cad2..2abe6e919224 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -178,7 +178,8 @@ static void page_pool_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset); - CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 4 * sizeof(long)); + CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, + PAGE_POOL_FRAG_GROUP_ALIGN); } static int page_pool_init(struct page_pool *pool, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eabfc8290f5e..87e67194f240 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3969,22 +3969,28 @@ static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); } -static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) +static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb, + struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); size_t min_ifinfo_dump_size = 0; - struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; struct net_device *dev; - int hdrlen; + struct nlattr *nla; + int hdrlen, rem; /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { - if (tb[IFLA_EXT_MASK]) - ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) + return NLMSG_GOODSIZE; + + nla_for_each_attr_type(nla, IFLA_EXT_MASK, + nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), rem) { + if (nla_len(nla) == sizeof(u32)) + ext_filter_mask = nla_get_u32(nla); } if (!ext_filter_mask) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index fd20aae30be2..bbf40b999713 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -434,7 +434,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, page = sg_page(sge); if (copied + copy > len) copy = len - copied; - copy = copy_page_to_iter(page, sge->offset, copy, iter); + if (copy) + copy = copy_page_to_iter(page, sge->offset, copy, iter); if (!copy) { copied = copied ? copied : -EFAULT; goto out; diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 7b4bbd674bae..cee188da54f8 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -171,11 +171,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info) */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) { - GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings"); + GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use); return -EINVAL; } if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) { diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 6b2a360dcdf0..67d06cd002a5 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -587,35 +587,64 @@ err_free_info: return err; } -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max) +static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + u32 max_ring = 0; + + mutex_lock(&dev->ethtool->rss_lock); + xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { + u32 i, *tbl; + + tbl = ethtool_rxfh_context_indir(ctx); + for (i = 0; i < ctx->indir_size; i++) + max_ring = max(max_ring, tbl[i]); + } + mutex_unlock(&dev->ethtool->rss_lock); + + return max_ring; +} + +u32 ethtool_get_max_rxfh_channel(struct net_device *dev) { struct ethtool_rxfh_param rxfh = {}; - u32 dev_size, current_max = 0; + u32 dev_size, current_max; int ret; + /* While we do track whether RSS context has an indirection + * table explicitly set by the user, no driver looks at that bit. + * Assume drivers won't auto-regenerate the additional tables, + * to be safe. + */ + current_max = ethtool_get_max_rss_ctx_channel(dev); + + if (!netif_is_rxfh_configured(dev)) + return current_max; + if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh) - return -EOPNOTSUPP; + return current_max; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) - return -EOPNOTSUPP; + return current_max; rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER); if (!rxfh.indir) - return -ENOMEM; + return U32_MAX; ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); - if (ret) - goto out; + if (ret) { + current_max = U32_MAX; + goto out_free; + } while (dev_size--) current_max = max(current_max, rxfh.indir[dev_size]); - *max = current_max; - -out: +out_free: kfree(rxfh.indir); - return ret; + return current_max; } int ethtool_check_ops(const struct ethtool_ops *ops) @@ -712,3 +741,17 @@ ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size) } } EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init); + +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id) +{ + struct ethtool_rxfh_context *ctx; + + WARN_ONCE(!rtnl_is_locked() && + !lockdep_is_held_type(&dev->ethtool->rss_lock, -1), + "RSS context lock assertion failed\n"); + + netdev_err(dev, "device error, RSS context %d lost\n", context_id); + ctx = xa_erase(&dev->ethtool->rss_ctx, context_id); + kfree(ctx); +} +EXPORT_SYMBOL(ethtool_rxfh_context_lost); diff --git a/net/ethtool/common.h b/net/ethtool/common.h index 28b8aaaf9bcb..b55705a9ad5a 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -42,7 +42,7 @@ int __ethtool_get_link(struct net_device *dev); bool convert_legacy_settings_to_link_ksettings( struct ethtool_link_ksettings *link_ksettings, const struct ethtool_cmd *legacy_settings); -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max); +u32 ethtool_get_max_rxfh_channel(struct net_device *dev); int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max); int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index d72b0fec89af..5ff128eacc9b 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1290,6 +1290,40 @@ out: return ret; } +static struct ethtool_rxfh_context * +ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, + u32 indir_size, u32 key_size) +{ + size_t indir_bytes, flex_len, key_off, size; + struct ethtool_rxfh_context *ctx; + u32 priv_bytes, indir_max; + u16 key_max; + + key_max = max(key_size, ops->rxfh_key_space); + indir_max = max(indir_size, ops->rxfh_indir_space); + + priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); + indir_bytes = array_size(indir_max, sizeof(u32)); + + key_off = size_add(priv_bytes, indir_bytes); + flex_len = size_add(key_off, key_max); + size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); + + ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); + if (!ctx) + return NULL; + + ctx->indir_size = indir_size; + ctx->key_size = key_size; + ctx->key_off = key_off; + ctx->priv_size = ops->rxfh_priv_size; + + ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; + ctx->input_xfrm = RXH_XFRM_NO_CHANGE; + + return ctx; +} + static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, void __user *useraddr) { @@ -1328,7 +1362,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) return -EINVAL; - if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) && + if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && + (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) && !ops->cap_rss_sym_xor_supported) return -EOPNOTSUPP; create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; @@ -1406,20 +1441,12 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, ret = -EINVAL; goto out; } - ctx = kzalloc(ethtool_rxfh_context_size(dev_indir_size, - dev_key_size, - ops->rxfh_priv_size), - GFP_KERNEL_ACCOUNT); + ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); if (!ctx) { ret = -ENOMEM; goto out; } - ctx->indir_size = dev_indir_size; - ctx->key_size = dev_key_size; - ctx->priv_size = ops->rxfh_priv_size; - /* Initialise to an empty context */ - ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; - ctx->input_xfrm = RXH_XFRM_NO_CHANGE; + if (ops->create_rxfh_context) { u32 limit = ops->rxfh_max_context_id ?: U32_MAX; u32 ctx_id; @@ -2049,9 +2076,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, * indirection table/rxnfc settings */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_t(u64, max_rxnfc_in_use, max_rxfh_in_use)) return -EINVAL; diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index b2de2108b356..34d76e87847d 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev) mutex_lock(&phydev->lock); if (!phydev->drv || !phydev->drv->get_sqi) ret = -EOPNOTSUPP; + else if (!phydev->link) + ret = -ENETDOWN; else ret = phydev->drv->get_sqi(phydev); mutex_unlock(&phydev->lock); @@ -55,6 +57,8 @@ static int linkstate_get_sqi_max(struct net_device *dev) mutex_lock(&phydev->lock); if (!phydev->drv || !phydev->drv->get_sqi_max) ret = -EOPNOTSUPP; + else if (!phydev->link) + ret = -ENETDOWN; else ret = phydev->drv->get_sqi_max(phydev); mutex_unlock(&phydev->lock); @@ -62,6 +66,17 @@ static int linkstate_get_sqi_max(struct net_device *dev) return ret; }; +static bool linkstate_sqi_critical_error(int sqi) +{ + return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN; +} + +static bool linkstate_sqi_valid(struct linkstate_reply_data *data) +{ + return data->sqi >= 0 && data->sqi_max >= 0 && + data->sqi <= data->sqi_max; +} + static int linkstate_get_link_ext_state(struct net_device *dev, struct linkstate_reply_data *data) { @@ -93,12 +108,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, data->link = __ethtool_get_link(dev); ret = linkstate_get_sqi(dev); - if (ret < 0 && ret != -EOPNOTSUPP) + if (linkstate_sqi_critical_error(ret)) goto out; data->sqi = ret; ret = linkstate_get_sqi_max(dev); - if (ret < 0 && ret != -EOPNOTSUPP) + if (linkstate_sqi_critical_error(ret)) goto out; data->sqi_max = ret; @@ -136,11 +151,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base, len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */ + 0; - if (data->sqi != -EOPNOTSUPP) - len += nla_total_size(sizeof(u32)); - - if (data->sqi_max != -EOPNOTSUPP) - len += nla_total_size(sizeof(u32)); + if (linkstate_sqi_valid(data)) { + len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */ + len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */ + } if (data->link_ext_state_provided) len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ @@ -164,13 +178,14 @@ static int linkstate_fill_reply(struct sk_buff *skb, nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link)) return -EMSGSIZE; - if (data->sqi != -EOPNOTSUPP && - nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) - return -EMSGSIZE; + if (linkstate_sqi_valid(data)) { + if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) + return -EMSGSIZE; - if (data->sqi_max != -EOPNOTSUPP && - nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) - return -EMSGSIZE; + if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, + data->sqi_max)) + return -EMSGSIZE; + } if (data->link_ext_state_provided) { if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, diff --git a/net/ethtool/module.c b/net/ethtool/module.c index aba78436d350..6988e07bdcd6 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -488,7 +488,7 @@ ethnl_module_fw_flash_ntf(struct net_device *dev, if (!skb) return; - hdr = ethnl_unicast_put(skb, ntf_params->portid, ntf_params->seq, + hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq, ETHTOOL_MSG_MODULE_FW_FLASH_NTF); if (!hdr) goto err_skb; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 47dacb575f74..ff9ab3d01ced 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2129,8 +2129,16 @@ void tcp_clear_retrans(struct tcp_sock *tp) static inline void tcp_init_undo(struct tcp_sock *tp) { tp->undo_marker = tp->snd_una; + /* Retransmission still in flight may cause DSACKs later. */ - tp->undo_retrans = tp->retrans_out ? : -1; + /* First, account for regular retransmits in flight: */ + tp->undo_retrans = tp->retrans_out; + /* Next, account for TLP retransmits in flight: */ + if (tp->tlp_high_seq && tp->tlp_retrans) + tp->undo_retrans++; + /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */ + if (!tp->undo_retrans) + tp->undo_retrans = -1; } static bool tcp_is_rack(const struct sock *sk) @@ -2209,6 +2217,7 @@ void tcp_enter_loss(struct sock *sk) tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; + tp->tlp_high_seq = 0; tcp_ecn_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous @@ -5989,6 +5998,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { + if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack && + TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq && + TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && + TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) + goto pass; syn_challenge: if (syn_inerr) TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); @@ -5998,6 +6012,7 @@ syn_challenge: goto discard; } +pass: bpf_skops_parse_hdr(sk, skb); return true; @@ -6804,6 +6819,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_fast_path_on(tp); if (sk->sk_shutdown & SEND_SHUTDOWN) tcp_shutdown(sk, SEND_SHUTDOWN); + + if (sk->sk_socket) + goto consume; break; case TCP_FIN_WAIT1: { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ab4b6de0e069..4d40615dc8fc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -479,15 +479,26 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk, const struct sk_buff *skb, u32 rtx_delta) { + const struct inet_connection_sock *icsk = inet_csk(sk); + u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); const struct tcp_sock *tp = tcp_sk(sk); - const int timeout = TCP_RTO_MAX * 2; + int timeout = TCP_RTO_MAX * 2; s32 rcv_delta; + if (user_timeout) { + /* If user application specified a TCP_USER_TIMEOUT, + * it does not want win 0 packets to 'reset the timer' + * while retransmits are not making progress. + */ + if (rtx_delta > user_timeout) + return true; + timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout)); + } /* Note: timer interrupt might have been delayed by at least one jiffy, * and tp->rcv_tstamp might very well have been written recently. * rcv_delta can thus be negative. */ - rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp; + rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp; if (rcv_delta <= timeout) return false; @@ -532,8 +543,6 @@ void tcp_retransmit_timer(struct sock *sk) if (WARN_ON_ONCE(!skb)) return; - tp->tlp_high_seq = 0; - if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { /* Receiver dastardly shrinks window. Our retransmits diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ed97df6af14d..49c622e743e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -326,6 +326,8 @@ found: goto fail_unlock; } + sock_set_flag(sk, SOCK_RCU_FREE); + sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); @@ -342,7 +344,7 @@ found: hslot2->count++; spin_unlock(&hslot2->lock); } - sock_set_flag(sk, SOCK_RCU_FREE); + error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 29dfbd70c79c..1c1decce7f06 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -436,6 +436,7 @@ int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel) { struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + struct l2tp_session *other_session = NULL; u32 session_key; int err; @@ -456,11 +457,10 @@ int l2tp_session_register(struct l2tp_session *session, * support existing userspace which depends on it. */ if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) { - struct l2tp_session *session2; - - session2 = idr_find(&pn->l2tp_v3_session_idr, - session_key); - err = l2tp_session_collision_add(pn, session, session2); + other_session = idr_find(&pn->l2tp_v3_session_idr, + session_key); + err = l2tp_session_collision_add(pn, session, + other_session); } spin_unlock_bh(&pn->l2tp_session_idr_lock); } else { @@ -484,10 +484,12 @@ int l2tp_session_register(struct l2tp_session *session, spin_unlock_bh(&tunnel->list_lock); spin_lock_bh(&pn->l2tp_session_idr_lock); - if (tunnel->version == L2TP_HDR_VER_3) - idr_replace(&pn->l2tp_v3_session_idr, session, session_key); - else + if (tunnel->version == L2TP_HDR_VER_3) { + if (!other_session) + idr_replace(&pn->l2tp_v3_session_idr, session, session_key); + } else { idr_replace(&pn->l2tp_v2_session_idr, session, session_key); + } spin_unlock_bh(&pn->l2tp_session_idr_lock); trace_register_session(session); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7eb2e5bedb6f..85cb71de370f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -263,7 +263,7 @@ static int ieee80211_start_p2p_device(struct wiphy *wiphy, lockdep_assert_wiphy(sdata->local->hw.wiphy); - ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; @@ -285,7 +285,7 @@ static int ieee80211_start_nan(struct wiphy *wiphy, lockdep_assert_wiphy(sdata->local->hw.wiphy); - ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; @@ -742,9 +742,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, break; } - params.key = key->conf.key; - params.key_len = key->conf.keylen; - callback(cookie, ¶ms); err = 0; @@ -4011,7 +4008,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, goto out; /* if reservation is invalid then this will fail */ - err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0); + err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0, -1); if (err) { ieee80211_link_unreserve_chanctx(link_data); goto out; @@ -5206,4 +5203,5 @@ const struct cfg80211_ops mac80211_config_ops = { .del_link_station = ieee80211_del_link_station, .set_hw_timestamp = ieee80211_set_hw_timestamp, .set_ttlm = ieee80211_set_ttlm, + .get_radio_mask = ieee80211_get_radio_mask, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 06a65dc6f6c6..e8567723e94d 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -47,24 +47,29 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local, ieee80211_chanctx_num_reserved(local, ctx); } -static int ieee80211_num_chanctx(struct ieee80211_local *local) +static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx) { struct ieee80211_chanctx *ctx; int num = 0; lockdep_assert_wiphy(local->hw.wiphy); - list_for_each_entry(ctx, &local->chanctx_list, list) + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (radio_idx >= 0 && ctx->conf.radio_idx != radio_idx) + continue; num++; + } return num; } -static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local) +static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local, + int radio_idx) { lockdep_assert_wiphy(local->hw.wiphy); - return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local); + return ieee80211_num_chanctx(local, radio_idx) < + ieee80211_max_num_channels(local, radio_idx); } static struct ieee80211_chanctx * @@ -656,7 +661,8 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local, static struct ieee80211_chanctx * ieee80211_alloc_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, - enum ieee80211_chanctx_mode mode) + enum ieee80211_chanctx_mode mode, + int radio_idx) { struct ieee80211_chanctx *ctx; @@ -674,6 +680,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local, ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; + ctx->conf.radio_idx = radio_idx; _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); return ctx; @@ -707,14 +714,15 @@ static struct ieee80211_chanctx * ieee80211_new_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, - bool assign_on_failure) + bool assign_on_failure, + int radio_idx) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_wiphy(local->hw.wiphy); - ctx = ieee80211_alloc_chanctx(local, chanreq, mode); + ctx = ieee80211_alloc_chanctx(local, chanreq, mode, radio_idx); if (!ctx) return ERR_PTR(-ENOMEM); @@ -1082,6 +1090,107 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link) return 0; } +static struct ieee80211_chanctx * +ieee80211_replace_chanctx(struct ieee80211_local *local, + const struct ieee80211_chan_req *chanreq, + enum ieee80211_chanctx_mode mode, + struct ieee80211_chanctx *curr_ctx) +{ + struct ieee80211_chanctx *new_ctx, *ctx; + struct wiphy *wiphy = local->hw.wiphy; + const struct wiphy_radio *radio; + + if (!curr_ctx || (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) { + /* + * Another link already requested this context for a + * reservation. Find another one hoping all links assigned + * to it will also switch soon enough. + * + * TODO: This needs a little more work as some cases + * (more than 2 chanctx capable devices) may fail which could + * otherwise succeed provided some channel context juggling was + * performed. + * + * Consider ctx1..3, link1..6, each ctx has 2 links. link1 and + * link2 from ctx1 request new different chandefs starting 2 + * in-place reserations with ctx4 and ctx5 replacing ctx1 and + * ctx2 respectively. Next link5 and link6 from ctx3 reserve + * ctx4. If link3 and link4 remain on ctx2 as they are then this + * fails unless `replace_ctx` from ctx5 is replaced with ctx3. + */ + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state != + IEEE80211_CHANCTX_REPLACE_NONE) + continue; + + if (!list_empty(&ctx->reserved_links)) + continue; + + if (ctx->conf.radio_idx >= 0) { + radio = &wiphy->radio[ctx->conf.radio_idx]; + if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) + continue; + } + + curr_ctx = ctx; + break; + } + } + + /* + * If that's true then all available contexts already have reservations + * and cannot be used. + */ + if (!curr_ctx || (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) + return ERR_PTR(-EBUSY); + + new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1); + if (!new_ctx) + return ERR_PTR(-ENOMEM); + + new_ctx->replace_ctx = curr_ctx; + new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER; + + curr_ctx->replace_ctx = new_ctx; + curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED; + + list_add_rcu(&new_ctx->list, &local->chanctx_list); + + return new_ctx; +} + +static bool +ieee80211_find_available_radio(struct ieee80211_local *local, + const struct ieee80211_chan_req *chanreq, + int *radio_idx) +{ + struct wiphy *wiphy = local->hw.wiphy; + const struct wiphy_radio *radio; + int i; + + *radio_idx = -1; + if (!wiphy->n_radio) + return true; + + for (i = 0; i < wiphy->n_radio; i++) { + radio = &wiphy->radio[i]; + if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) + continue; + + if (!ieee80211_can_create_new_chanctx(local, i)) + continue; + + *radio_idx = i; + return true; + } + + return false; +} + int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, @@ -1089,7 +1198,8 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx; + struct ieee80211_chanctx *new_ctx, *curr_ctx; + int radio_idx; lockdep_assert_wiphy(local->hw.wiphy); @@ -1099,76 +1209,15 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode); if (!new_ctx) { - if (ieee80211_can_create_new_chanctx(local)) { + if (ieee80211_can_create_new_chanctx(local, -1) && + ieee80211_find_available_radio(local, chanreq, &radio_idx)) new_ctx = ieee80211_new_chanctx(local, chanreq, mode, - false); - if (IS_ERR(new_ctx)) - return PTR_ERR(new_ctx); - } else { - if (!curr_ctx || - (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) { - /* - * Another link already requested this context - * for a reservation. Find another one hoping - * all links assigned to it will also switch - * soon enough. - * - * TODO: This needs a little more work as some - * cases (more than 2 chanctx capable devices) - * may fail which could otherwise succeed - * provided some channel context juggling was - * performed. - * - * Consider ctx1..3, link1..6, each ctx has 2 - * links. link1 and link2 from ctx1 request new - * different chandefs starting 2 in-place - * reserations with ctx4 and ctx5 replacing - * ctx1 and ctx2 respectively. Next link5 and - * link6 from ctx3 reserve ctx4. If link3 and - * link4 remain on ctx2 as they are then this - * fails unless `replace_ctx` from ctx5 is - * replaced with ctx3. - */ - list_for_each_entry(ctx, &local->chanctx_list, - list) { - if (ctx->replace_state != - IEEE80211_CHANCTX_REPLACE_NONE) - continue; - - if (!list_empty(&ctx->reserved_links)) - continue; - - curr_ctx = ctx; - break; - } - } - - /* - * If that's true then all available contexts already - * have reservations and cannot be used. - */ - if (!curr_ctx || - (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) - return -EBUSY; - - new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode); - if (!new_ctx) - return -ENOMEM; - - new_ctx->replace_ctx = curr_ctx; - new_ctx->replace_state = - IEEE80211_CHANCTX_REPLACES_OTHER; - - curr_ctx->replace_ctx = new_ctx; - curr_ctx->replace_state = - IEEE80211_CHANCTX_WILL_BE_REPLACED; - - list_add_rcu(&new_ctx->list, &local->chanctx_list); - } + false, radio_idx); + else + new_ctx = ieee80211_replace_chanctx(local, chanreq, + mode, curr_ctx); + if (IS_ERR(new_ctx)) + return PTR_ERR(new_ctx); } list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links); @@ -1800,6 +1849,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, struct ieee80211_chanctx *ctx; u8 radar_detect_width = 0; bool reserved = false; + int radio_idx; int ret; lockdep_assert_wiphy(local->hw.wiphy); @@ -1820,7 +1870,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, link->radar_required = ret; ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode, - radar_detect_width); + radar_detect_width, -1); if (ret < 0) goto out; @@ -1830,9 +1880,11 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, /* Note: context is now reserved */ if (ctx) reserved = true; + else if (!ieee80211_find_available_radio(local, chanreq, &radio_idx)) + ctx = ERR_PTR(-EBUSY); else ctx = ieee80211_new_chanctx(local, chanreq, mode, - assign_on_failure); + assign_on_failure, radio_idx); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto out; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 98310188f330..02b5476a4376 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -483,7 +483,6 @@ static const char *hw_flag_names[] = { FLAG(REPORTS_LOW_ACK), FLAG(SUPPORTS_TX_FRAG), FLAG(SUPPORTS_TDLS_BUFFER_STA), - FLAG(DEAUTH_NEED_MGD_TX_PREP), FLAG(DOESNT_SUPPORT_QOS_NDP), FLAG(BUFF_MMPDU_TXQ), FLAG(SUPPORTS_VHT_EXT_NSS_BW), diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 7db4c3ee7e6d..3f74bbceeca5 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1746,7 +1746,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; ret = ieee80211_check_combinations(sdata, ¶ms->chandef, chanmode, - radar_detect_width); + radar_detect_width, -1); if (ret < 0) return ret; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6349552e62a8..a3485e4c6132 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2640,8 +2640,9 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local, int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, - u8 radar_detect); -int ieee80211_max_num_channels(struct ieee80211_local *local); + u8 radar_detect, int radio_idx); +int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx); +u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev); void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 6d969d9f1ac9..b4ad66af3af3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -397,7 +397,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, } } - return ieee80211_check_combinations(sdata, NULL, 0, 0); + return ieee80211_check_combinations(sdata, NULL, 0, 0, -1); } static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, @@ -689,8 +689,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do fallthrough; default: - if (going_down) - drv_remove_interface(local, sdata); + if (!going_down) + break; + drv_remove_interface(local, sdata); + + /* Clear private driver data to prevent reuse */ + memset(sdata->vif.drv_priv, 0, local->hw.vif_data_size); } ieee80211_recalc_ps(local); diff --git a/net/mac80211/link.c b/net/mac80211/link.c index 2e6e92defbca..1a211b8d4057 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -72,6 +72,8 @@ void ieee80211_link_stop(struct ieee80211_link_data *link) cancel_delayed_work_sync(&link->color_collision_detect_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, + &link->color_change_finalize_work); + wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->csa.finalize_work); ieee80211_link_release_channel(link); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a9aefc83d30a..a3104b6ea6f0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -148,7 +148,7 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ - if (offchannel_flag || local->scanning || + if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; @@ -1091,6 +1091,27 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) return 0; } +static bool +ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb) +{ + int i, j; + + for (i = 0; i < n_comb; i++, c++) { + /* DFS is not supported with multi-channel combinations yet */ + if (c->radar_detect_widths && + c->num_different_channels > 1) + return false; + + /* mac80211 doesn't support more than one IBSS interface */ + for (j = 0; j < c->n_limits; j++) + if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && + c->limits[j].max > 1) + return false; + } + + return true; +} + int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); @@ -1161,9 +1182,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) return -EINVAL; - - if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP))) - return -EINVAL; } #ifdef CONFIG_PM @@ -1180,17 +1198,20 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (comb->num_different_channels > 1) return -EINVAL; } - } else { - /* DFS is not supported with multi-channel combinations yet */ - for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *comb; + } - comb = &local->hw.wiphy->iface_combinations[i]; + if (hw->wiphy->n_radio) { + for (i = 0; i < hw->wiphy->n_radio; i++) { + const struct wiphy_radio *radio = &hw->wiphy->radio[i]; - if (comb->radar_detect_widths && - comb->num_different_channels > 1) + if (!ieee80211_ifcomb_check(radio->iface_combinations, + radio->n_iface_combinations)) return -EINVAL; } + } else { + if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations, + hw->wiphy->n_iface_combinations)) + return -EINVAL; } /* Only HW csum features are currently compatible with mac80211 */ @@ -1320,18 +1341,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); - /* mac80211 doesn't support more than one IBSS interface right now */ - for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; - int j; - - c = &hw->wiphy->iface_combinations[i]; - - for (j = 0; j < c->n_limits; j++) - if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && - c->limits[j].max > 1) - return -EINVAL; - } local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + sizeof(void *) * channels, GFP_KERNEL); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3d207d79d11f..4779a18ab75d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3521,6 +3521,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, u64 changed = 0; struct ieee80211_prep_tx_info info = { .subtype = stype, + .was_assoc = true, + .link_id = ffs(sdata->vif.active_links) - 1, }; lockdep_assert_wiphy(local->hw.wiphy); @@ -3569,29 +3571,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, /* deauthenticate/disassociate now */ if (tx || frame_buf) { - /* - * In multi channel scenarios guarantee that the virtual - * interface is granted immediate airtime to transmit the - * deauthentication frame by calling mgd_prepare_tx, if the - * driver requested so. - */ - if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP)) { - for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); - link_id++) { - struct ieee80211_link_data *link; - - link = sdata_dereference(sdata->link[link_id], - sdata); - if (!link) - continue; - if (link->u.mgd.have_beacon) - break; - } - if (link_id == IEEE80211_MLD_MAX_NUM_LINKS) { - info.link_id = ffs(sdata->vif.active_links) - 1; - drv_mgd_prepare_tx(sdata->local, sdata, &info); - } - } + drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, sdata->vif.cfg.ap_addr, stype, @@ -6696,7 +6676,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_mgmt *mgmt = (void *) hdr; size_t baselen; @@ -6740,7 +6720,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, parse_params.len = len - baselen; rcu_read_lock(); - chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + chanctx_conf = rcu_dereference(bss_conf->chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return; @@ -6770,11 +6750,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, ifmgd->assoc_data->need_beacon = false; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { - link->conf->sync_tsf = + bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); - link->conf->sync_device_ts = + bss_conf->sync_device_ts = rx_status->device_timestamp; - link->conf->sync_dtim_count = elems->dtim_count; + bss_conf->sync_dtim_count = elems->dtim_count; } if (elems->mbssid_config_ie) @@ -6798,7 +6778,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, } if (!ifmgd->associated || - !ieee80211_rx_our_beacon(bssid, link->conf->bss)) + !ieee80211_rx_our_beacon(bssid, bss_conf->bss)) return; bssid = link->u.mgd.bssid; @@ -6825,7 +6805,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); - parse_params.bss = link->conf->bss; + parse_params.bss = bss_conf->bss; parse_params.filter = care_about_ies; parse_params.crc = ncrc; elems = ieee802_11_parse_elems_full(&parse_params); @@ -6906,11 +6886,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, */ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { - link->conf->sync_tsf = + bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); - link->conf->sync_device_ts = + bss_conf->sync_device_ts = rx_status->device_timestamp; - link->conf->sync_dtim_count = elems->dtim_count; + bss_conf->sync_dtim_count = elems->dtim_count; } if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || @@ -6973,10 +6953,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, goto free; } - if (WARN_ON(!link->conf->chanreq.oper.chan)) + if (WARN_ON(!bss_conf->chanreq.oper.chan)) goto free; - sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band]; + sband = local->hw.wiphy->bands[bss_conf->chanreq.oper.chan->band]; changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0ff9062a130c..59ad24a71141 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3358,6 +3358,7 @@ static void ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) { struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + struct ieee80211_bss_conf *bss_conf; const struct element *ie; size_t baselen; @@ -3368,7 +3369,9 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) return; - if (rx->link->conf->csa_active) + bss_conf = rx->link->conf; + if (bss_conf->csa_active || bss_conf->color_change_active || + !bss_conf->he_bss_color.enabled) return; baselen = mgmt->u.beacon.variable - rx->skb->data; @@ -3380,7 +3383,6 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) rx->skb->len - baselen); if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { - struct ieee80211_bss_conf *bss_conf = rx->link->conf; const struct ieee80211_he_operation *he_oper; u8 color; diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index e91ca4ccdd37..073ff9e0f397 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -343,6 +343,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, break; } + /* capture the AP configuration */ + csa_ie->chanreq.ap = csa_ie->chanreq.oper; + /* parse one of the Elements to build a new chandef */ memset(&new_chandef, 0, sizeof(new_chandef)); new_chandef.chan = new_chan; @@ -369,11 +372,11 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, validate_chandef_by_ht_vht_oper(sdata, conn, vht_cap_info, &new_chandef); - /* capture the AP chandef before (potential) downgrading */ - csa_ie->chanreq.ap = new_chandef; - /* if data is there validate the bandwidth & use it */ if (new_chandef.chan) { + /* capture the AP chandef before (potential) downgrading */ + csa_ie->chanreq.ap = new_chandef; + if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 && new_chandef.width == NL80211_CHAN_WIDTH_320) ieee80211_chandef_downgrade(&new_chandef, NULL); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c6d5f73119d8..ced19ce7c51a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3932,19 +3932,103 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, return radar_detect; } +static u32 +__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_bss_conf *link_conf; + struct ieee80211_chanctx_conf *conf; + unsigned int link_id; + u32 mask = 0; + + for_each_vif_active_link(&sdata->vif, link_conf, link_id) { + conf = sdata_dereference(link_conf->chanctx_conf, sdata); + if (!conf || conf->radio_idx < 0) + continue; + + mask |= BIT(conf->radio_idx); + } + + return mask; +} + +u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return __ieee80211_get_radio_mask(sdata); +} + +static bool +ieee80211_sdata_uses_radio(struct ieee80211_sub_if_data *sdata, int radio_idx) +{ + if (radio_idx < 0) + return true; + + return __ieee80211_get_radio_mask(sdata) & BIT(radio_idx); +} + +static int +ieee80211_fill_ifcomb_params(struct ieee80211_local *local, + struct iface_combination_params *params, + const struct cfg80211_chan_def *chandef, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *sdata_iter; + struct ieee80211_chanctx *ctx; + int total = !!sdata; + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + + if (params->radio_idx >= 0 && + ctx->conf.radio_idx != params->radio_idx) + continue; + + params->radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); + + if (chandef && ctx->mode != IEEE80211_CHANCTX_EXCLUSIVE && + cfg80211_chandef_compatible(chandef, &ctx->conf.def)) + continue; + + params->num_different_channels++; + } + + list_for_each_entry(sdata_iter, &local->interfaces, list) { + struct wireless_dev *wdev_iter; + + wdev_iter = &sdata_iter->wdev; + + if (sdata_iter == sdata || + !ieee80211_sdata_running(sdata_iter) || + cfg80211_iftype_allowed(local->hw.wiphy, + wdev_iter->iftype, 0, 1)) + continue; + + if (!ieee80211_sdata_uses_radio(sdata_iter, params->radio_idx)) + continue; + + params->iftype_num[wdev_iter->iftype]++; + total++; + } + + return total; +} + int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, - u8 radar_detect) + u8 radar_detect, int radio_idx) { + bool shared = chanmode == IEEE80211_CHANCTX_SHARED; struct ieee80211_local *local = sdata->local; - struct ieee80211_sub_if_data *sdata_iter; enum nl80211_iftype iftype = sdata->wdev.iftype; - struct ieee80211_chanctx *ctx; - int total = 1; struct iface_combination_params params = { .radar_detect = radar_detect, + .radio_idx = radio_idx, }; + int total; lockdep_assert_wiphy(local->hw.wiphy); @@ -3981,37 +4065,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, if (iftype != NL80211_IFTYPE_UNSPECIFIED) params.iftype_num[iftype] = 1; - list_for_each_entry(ctx, &local->chanctx_list, list) { - if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) - continue; - params.radar_detect |= - ieee80211_chanctx_radar_detect(local, ctx); - if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) { - params.num_different_channels++; - continue; - } - if (chandef && chanmode == IEEE80211_CHANCTX_SHARED && - cfg80211_chandef_compatible(chandef, - &ctx->conf.def)) - continue; - params.num_different_channels++; - } - - list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) { - struct wireless_dev *wdev_iter; - - wdev_iter = &sdata_iter->wdev; - - if (sdata_iter == sdata || - !ieee80211_sdata_running(sdata_iter) || - cfg80211_iftype_allowed(local->hw.wiphy, - wdev_iter->iftype, 0, 1)) - continue; - - params.iftype_num[wdev_iter->iftype]++; - total++; - } - + total = ieee80211_fill_ifcomb_params(local, ¶ms, + shared ? chandef : NULL, + sdata); if (total == 1 && !params.radar_detect) return 0; @@ -4028,28 +4084,17 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, c->num_different_channels); } -int ieee80211_max_num_channels(struct ieee80211_local *local) +int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx) { - struct ieee80211_sub_if_data *sdata; - struct ieee80211_chanctx *ctx; u32 max_num_different_channels = 1; int err; - struct iface_combination_params params = {0}; + struct iface_combination_params params = { + .radio_idx = radio_idx, + }; lockdep_assert_wiphy(local->hw.wiphy); - list_for_each_entry(ctx, &local->chanctx_list, list) { - if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) - continue; - - params.num_different_channels++; - - params.radar_detect |= - ieee80211_chanctx_radar_detect(local, ctx); - } - - list_for_each_entry_rcu(sdata, &local->interfaces, list) - params.iftype_num[sdata->wdev.iftype]++; + ieee80211_fill_ifcomb_params(local, ¶ms, NULL, NULL); err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, ieee80211_iter_max_chans, diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4a07834809bb..481ee78e77bc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3866,6 +3866,15 @@ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *r nf_tables_rule_destroy(ctx, rule); } +/** nft_chain_validate - loop detection and hook validation + * + * @ctx: context containing call depth and base chain + * @chain: chain to validate + * + * Walk through the rules of the given chain and chase all jumps/gotos + * and set lookups until either the jump limit is hit or all reachable + * chains have been validated. + */ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) { struct nft_expr *expr, *last; @@ -3887,6 +3896,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain) if (!expr->ops->validate) continue; + /* This may call nft_chain_validate() recursively, + * callers that do so must increment ctx->level. + */ err = expr->ops->validate(ctx, expr, &data); if (err < 0) return err; @@ -10879,150 +10891,6 @@ int nft_chain_validate_hooks(const struct nft_chain *chain, } EXPORT_SYMBOL_GPL(nft_chain_validate_hooks); -/* - * Loop detection - walk through the ruleset beginning at the destination chain - * of a new jump until either the source chain is reached (loop) or all - * reachable chains have been traversed. - * - * The loop check is performed whenever a new jump verdict is added to an - * expression or verdict map or a verdict map is bound to a new chain. - */ - -static int nf_tables_check_loops(const struct nft_ctx *ctx, - const struct nft_chain *chain); - -static int nft_check_loops(const struct nft_ctx *ctx, - const struct nft_set_ext *ext) -{ - const struct nft_data *data; - int ret; - - data = nft_set_ext_data(ext); - switch (data->verdict.code) { - case NFT_JUMP: - case NFT_GOTO: - ret = nf_tables_check_loops(ctx, data->verdict.chain); - break; - default: - ret = 0; - break; - } - - return ret; -} - -static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, - struct nft_set *set, - const struct nft_set_iter *iter, - struct nft_elem_priv *elem_priv) -{ - const struct nft_set_ext *ext = nft_set_elem_ext(set, elem_priv); - - if (!nft_set_elem_active(ext, iter->genmask)) - return 0; - - if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && - *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) - return 0; - - return nft_check_loops(ctx, ext); -} - -static int nft_set_catchall_loops(const struct nft_ctx *ctx, - struct nft_set *set) -{ - u8 genmask = nft_genmask_next(ctx->net); - struct nft_set_elem_catchall *catchall; - struct nft_set_ext *ext; - int ret = 0; - - list_for_each_entry_rcu(catchall, &set->catchall_list, list) { - ext = nft_set_elem_ext(set, catchall->elem); - if (!nft_set_elem_active(ext, genmask)) - continue; - - ret = nft_check_loops(ctx, ext); - if (ret < 0) - return ret; - } - - return ret; -} - -static int nf_tables_check_loops(const struct nft_ctx *ctx, - const struct nft_chain *chain) -{ - const struct nft_rule *rule; - const struct nft_expr *expr, *last; - struct nft_set *set; - struct nft_set_binding *binding; - struct nft_set_iter iter; - - if (ctx->chain == chain) - return -ELOOP; - - if (fatal_signal_pending(current)) - return -EINTR; - - list_for_each_entry(rule, &chain->rules, list) { - nft_rule_for_each_expr(expr, last, rule) { - struct nft_immediate_expr *priv; - const struct nft_data *data; - int err; - - if (strcmp(expr->ops->type->name, "immediate")) - continue; - - priv = nft_expr_priv(expr); - if (priv->dreg != NFT_REG_VERDICT) - continue; - - data = &priv->data; - switch (data->verdict.code) { - case NFT_JUMP: - case NFT_GOTO: - err = nf_tables_check_loops(ctx, - data->verdict.chain); - if (err < 0) - return err; - break; - default: - break; - } - } - } - - list_for_each_entry(set, &ctx->table->sets, list) { - if (!nft_is_active_next(ctx->net, set)) - continue; - if (!(set->flags & NFT_SET_MAP) || - set->dtype != NFT_DATA_VERDICT) - continue; - - list_for_each_entry(binding, &set->bindings, list) { - if (!(binding->flags & NFT_SET_MAP) || - binding->chain != chain) - continue; - - iter.genmask = nft_genmask_next(ctx->net); - iter.type = NFT_ITER_UPDATE; - iter.skip = 0; - iter.count = 0; - iter.err = 0; - iter.fn = nf_tables_loop_check_setelem; - - set->ops->walk(ctx, set, &iter); - if (!iter.err) - iter.err = nft_set_catchall_loops(ctx, set); - - if (iter.err < 0) - return iter.err; - } - } - - return 0; -} - /** * nft_parse_u32_check - fetch u32 attribute and check for maximum value * @@ -11135,7 +11003,7 @@ static int nft_validate_register_store(const struct nft_ctx *ctx, if (data != NULL && (data->verdict.code == NFT_GOTO || data->verdict.code == NFT_JUMP)) { - err = nf_tables_check_loops(ctx, data->verdict.chain); + err = nft_chain_validate(ctx, data->verdict.chain); if (err < 0) return err; } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index f1c31757e496..55e28e1da66e 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -325,7 +325,7 @@ static void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) hooks = nf_hook_entries_head(net, pf, entry->state.hook); i = entry->hook_index; - if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) { + if (!hooks || i >= hooks->num_hook_entries) { kfree_skb_reason(skb, SKB_DROP_REASON_NETFILTER_DROP); nf_queue_entry_free(entry); return; diff --git a/net/psample/psample.c b/net/psample/psample.c index f48b5b9cd409..a0ddae8a65f9 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -360,8 +360,9 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info) } #endif -void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, - u32 sample_rate, const struct psample_metadata *md) +void psample_sample_packet(struct psample_group *group, + const struct sk_buff *skb, u32 sample_rate, + const struct psample_metadata *md) { ktime_t tstamp = ktime_get_real(); int out_ifindex = md->out_ifindex; @@ -498,7 +499,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, goto error; if (md->rate_as_probability) - nla_put_flag(skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY); + nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY); genlmsg_end(nl_skb, data); genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0, diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index a6b7c514a181..113b907da0f7 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1081,6 +1081,14 @@ do_nat: err = nf_conntrack_confirm(skb); if (err != NF_ACCEPT) goto nf_error; + + /* The ct may be dropped if a clash has been resolved, + * so it's necessary to retrieve it from skb again to + * prevent UAF. + */ + ct = nf_ct_get(skb, &ctinfo); + if (!ct) + skip_add = true; } if (!skip_add) diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index cd0accaf844a..dc0229693461 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -246,7 +246,7 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, memset(&opt, 0, sizeof(opt)); opt.index = d->tcf_index; - opt.refcnt = refcount_read(&d->tcf_refcnt) - ref, + opt.refcnt = refcount_read(&d->tcf_refcnt) - ref; opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind; spin_lock_bh(&d->tcf_lock); opt.action = d->tcf_action; diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index c2ef9dcf91d2..cc6051d4f2ef 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; - tcx_miniq_set_active(entry, true); + tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); @@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch) tcf_block_put_ext(q->block, sch, &q->block_info); if (entry) { - tcx_miniq_set_active(entry, false); + tcx_miniq_dec(entry); if (!tcx_entry_is_active(entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(entry); @@ -257,7 +257,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; - tcx_miniq_set_active(entry, true); + tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); @@ -276,7 +276,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, entry = tcx_entry_fetch_or_create(dev, false, &created); if (!entry) return -ENOMEM; - tcx_miniq_set_active(entry, true); + tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, false); @@ -302,7 +302,7 @@ static void clsact_destroy(struct Qdisc *sch) tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); if (ingress_entry) { - tcx_miniq_set_active(ingress_entry, false); + tcx_miniq_dec(ingress_entry); if (!tcx_entry_is_active(ingress_entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(ingress_entry); @@ -310,7 +310,7 @@ static void clsact_destroy(struct Qdisc *sch) } if (egress_entry) { - tcx_miniq_set_active(egress_entry, false); + tcx_miniq_dec(egress_entry); if (!tcx_entry_is_active(egress_entry)) { tcx_entry_update(dev, NULL, false); tcx_entry_free(egress_entry); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index dfc353eea8ed..0e1691316f42 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2441,6 +2441,13 @@ static void xs_tcp_setup_socket(struct work_struct *work) transport->srcport = 0; status = -EAGAIN; break; + case -EPERM: + /* Happens, for instance, if a BPF program is preventing + * the connect. Remap the error so upper layers can better + * deal with it. + */ + status = -ECONNREFUSED; + fallthrough; case -EINVAL: /* Happens, for instance, if the user specified a link * local IPv6 address without a scope-id. diff --git a/net/tipc/core.h b/net/tipc/core.h index 7eccd97e0609..7f3fe3401c45 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -72,7 +72,6 @@ struct tipc_node; struct tipc_bearer; struct tipc_bc_base; struct tipc_link; -struct tipc_name_table; struct tipc_topsrv; struct tipc_monitor; #ifdef CONFIG_TIPC_CRYPTO diff --git a/net/tipc/link.c b/net/tipc/link.c index 0716eb5c8a31..5c2088a469ce 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -241,13 +241,6 @@ enum { LINK_SYNCHING = 0xc << 24 }; -/* Link FSM state checking routines - */ -static int link_is_up(struct tipc_link *l) -{ - return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); -} - static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, @@ -274,7 +267,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released, */ bool tipc_link_is_up(struct tipc_link *l) { - return link_is_up(l); + return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); } bool tipc_link_peer_is_down(struct tipc_link *l) @@ -1790,7 +1783,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, rcv_nxt = l->rcv_nxt; win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; - if (unlikely(!link_is_up(l))) { + if (unlikely(!tipc_link_is_up(l))) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; kfree_skb(skb); @@ -1848,7 +1841,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, struct tipc_link *bcl = l->bc_rcvlink; struct tipc_msg *hdr; struct sk_buff *skb; - bool node_up = link_is_up(bcl); + bool node_up = tipc_link_is_up(bcl); u16 glen = 0, bc_rcvgap = 0; int dlen = 0; void *data; @@ -2163,7 +2156,7 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) if (session != curr_session) return false; /* Extra sanity check */ - if (!link_is_up(l) && msg_ack(hdr)) + if (!tipc_link_is_up(l) && msg_ack(hdr)) return false; if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) return true; @@ -2261,7 +2254,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, } /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ - if (mtyp == RESET_MSG || !link_is_up(l)) + if (mtyp == RESET_MSG || !tipc_link_is_up(l)) rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); /* ACTIVATE_MSG takes up link if it was already locally reset */ @@ -2300,7 +2293,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (msg_probe(hdr)) l->stats.recv_probes++; - if (!link_is_up(l)) { + if (!tipc_link_is_up(l)) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; break; @@ -2387,7 +2380,7 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) int mtyp = msg_type(hdr); u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); - if (link_is_up(l)) + if (tipc_link_is_up(l)) return; if (msg_user(hdr) == BCAST_PROTOCOL) { @@ -2415,7 +2408,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); int rc = 0; - if (!link_is_up(l)) + if (!tipc_link_is_up(l)) return rc; if (!msg_peer_node_is_up(hdr)) @@ -2475,7 +2468,7 @@ int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap, bool unused = false; int rc = 0; - if (!link_is_up(r) || !r->bc_peer_is_up) + if (!tipc_link_is_up(r) || !r->bc_peer_is_up) return 0; if (gap) { @@ -2873,7 +2866,7 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, l->tolerance = tol; if (l->bc_rcvlink) l->bc_rcvlink->tolerance = tol; - if (link_is_up(l)) + if (tipc_link_is_up(l)) tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); } diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 19d81200a2a2..7397a372c78e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1631,71 +1631,87 @@ nla_put_failure: return -ENOBUFS; } -static int nl80211_put_iface_combinations(struct wiphy *wiphy, - struct sk_buff *msg, - bool large) +static int nl80211_put_ifcomb_data(struct sk_buff *msg, bool large, int idx, + const struct ieee80211_iface_combination *c, + u16 nested) { - struct nlattr *nl_combis; - int i, j; + struct nlattr *nl_combi, *nl_limits; + int i; - nl_combis = nla_nest_start_noflag(msg, - NL80211_ATTR_INTERFACE_COMBINATIONS); - if (!nl_combis) + nl_combi = nla_nest_start_noflag(msg, idx | nested); + if (!nl_combi) goto nla_put_failure; - for (i = 0; i < wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; - struct nlattr *nl_combi, *nl_limits; + nl_limits = nla_nest_start_noflag(msg, NL80211_IFACE_COMB_LIMITS | + nested); + if (!nl_limits) + goto nla_put_failure; - c = &wiphy->iface_combinations[i]; + for (i = 0; i < c->n_limits; i++) { + struct nlattr *nl_limit; - nl_combi = nla_nest_start_noflag(msg, i + 1); - if (!nl_combi) + nl_limit = nla_nest_start_noflag(msg, i + 1); + if (!nl_limit) goto nla_put_failure; - - nl_limits = nla_nest_start_noflag(msg, - NL80211_IFACE_COMB_LIMITS); - if (!nl_limits) + if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[i].max)) + goto nla_put_failure; + if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, + c->limits[i].types)) goto nla_put_failure; + nla_nest_end(msg, nl_limit); + } - for (j = 0; j < c->n_limits; j++) { - struct nlattr *nl_limit; + nla_nest_end(msg, nl_limits); - nl_limit = nla_nest_start_noflag(msg, j + 1); - if (!nl_limit) - goto nla_put_failure; - if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, - c->limits[j].max)) - goto nla_put_failure; - if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, - c->limits[j].types)) - goto nla_put_failure; - nla_nest_end(msg, nl_limit); - } + if (c->beacon_int_infra_match && + nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) + goto nla_put_failure; + if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, + c->num_different_channels) || + nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, + c->max_interfaces)) + goto nla_put_failure; + if (large && + (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, + c->radar_detect_widths) || + nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, + c->radar_detect_regions))) + goto nla_put_failure; + if (c->beacon_int_min_gcd && + nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, + c->beacon_int_min_gcd)) + goto nla_put_failure; - nla_nest_end(msg, nl_limits); + nla_nest_end(msg, nl_combi); - if (c->beacon_int_infra_match && - nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) - goto nla_put_failure; - if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, - c->num_different_channels) || - nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, - c->max_interfaces)) - goto nla_put_failure; - if (large && - (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, - c->radar_detect_widths) || - nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, - c->radar_detect_regions))) - goto nla_put_failure; - if (c->beacon_int_min_gcd && - nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, - c->beacon_int_min_gcd)) - goto nla_put_failure; + return 0; +nla_put_failure: + return -ENOBUFS; +} + +static int nl80211_put_iface_combinations(struct wiphy *wiphy, + struct sk_buff *msg, + int attr, int radio, + bool large, u16 nested) +{ + const struct ieee80211_iface_combination *c; + struct nlattr *nl_combis; + int i, n; - nla_nest_end(msg, nl_combi); + nl_combis = nla_nest_start_noflag(msg, attr | nested); + if (!nl_combis) + goto nla_put_failure; + + if (radio >= 0) { + c = wiphy->radio[0].iface_combinations; + n = wiphy->radio[0].n_iface_combinations; + } else { + c = wiphy->iface_combinations; + n = wiphy->n_iface_combinations; } + for (i = 0; i < n; i++) + if (nl80211_put_ifcomb_data(msg, large, i + 1, &c[i], nested)) + goto nla_put_failure; nla_nest_end(msg, nl_combis); @@ -2401,6 +2417,80 @@ fail: return -ENOBUFS; } +static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx) +{ + const struct wiphy_radio *r = &wiphy->radio[idx]; + struct nlattr *radio, *freq; + int i; + + radio = nla_nest_start(msg, idx); + if (!radio) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx)) + goto nla_put_failure; + + for (i = 0; i < r->n_freq_range; i++) { + const struct wiphy_radio_freq_range *range = &r->freq_range[i]; + + freq = nla_nest_start(msg, NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE); + if (!freq) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_START, + range->start_freq) || + nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_END, + range->end_freq)) + goto nla_put_failure; + + nla_nest_end(msg, freq); + } + + for (i = 0; i < r->n_iface_combinations; i++) + if (nl80211_put_ifcomb_data(msg, true, + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION, + &r->iface_combinations[i], + NLA_F_NESTED)) + goto nla_put_failure; + + nla_nest_end(msg, radio); + + return 0; + +nla_put_failure: + return -ENOBUFS; +} + +static int nl80211_put_radios(struct wiphy *wiphy, struct sk_buff *msg) +{ + struct nlattr *radios; + int i; + + if (!wiphy->n_radio) + return 0; + + radios = nla_nest_start(msg, NL80211_ATTR_WIPHY_RADIOS); + if (!radios) + return -ENOBUFS; + + for (i = 0; i < wiphy->n_radio; i++) + if (nl80211_put_radio(wiphy, msg, i)) + goto fail; + + nla_nest_end(msg, radios); + + if (nl80211_put_iface_combinations(wiphy, msg, + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS, + -1, true, NLA_F_NESTED)) + return -ENOBUFS; + + return 0; + +fail: + nla_nest_cancel(msg, radios); + return -ENOBUFS; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@ -2696,7 +2786,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, goto nla_put_failure; if (nl80211_put_iface_combinations(&rdev->wiphy, msg, - state->split)) + NL80211_ATTR_INTERFACE_COMBINATIONS, + rdev->wiphy.n_radio ? 0 : -1, + state->split, 0)) goto nla_put_failure; state->split_start++; @@ -3010,6 +3102,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, rdev->wiphy.hw_timestamp_max_peers)) goto nla_put_failure; + state->split_start++; + break; + case 17: + if (nl80211_put_radios(&rdev->wiphy, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -4487,10 +4585,7 @@ static void get_key_callback(void *c, struct key_params *params) struct nlattr *key; struct get_key_cookie *cookie = c; - if ((params->key && - nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && @@ -4502,10 +4597,7 @@ static void get_key_callback(void *c, struct key_params *params) if (!key) goto nla_put_failure; - if ((params->key && - nla_put(cookie->msg, NL80211_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 8f15658002ee..ec3f4aa1c807 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1532,4 +1532,16 @@ rdev_set_ttlm(struct cfg80211_registered_device *rdev, return ret; } + +static inline u32 +rdev_get_radio_mask(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wiphy *wiphy = &rdev->wiphy; + + if (!rdev->ops->get_radio_mask) + return 0; + + return rdev->ops->get_radio_mask(wiphy, dev); +} #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/util.c b/net/wireless/util.c index af6ec719567f..9a7c3adc8a3b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2307,13 +2307,16 @@ static int cfg80211_wdev_bi(struct wireless_dev *wdev) static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, u32 *beacon_int_gcd, - bool *beacon_int_different) + bool *beacon_int_different, + int radio_idx) { + struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; *beacon_int_gcd = 0; *beacon_int_different = false; + rdev = wiphy_to_rdev(wiphy); list_for_each_entry(wdev, &wiphy->wdev_list, list) { int wdev_bi; @@ -2321,6 +2324,11 @@ static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, if (wdev->valid_links) continue; + /* skip wdevs not active on the given wiphy radio */ + if (radio_idx >= 0 && + !(rdev_get_radio_mask(rdev, wdev->netdev) & BIT(radio_idx))) + continue; + wdev_bi = cfg80211_wdev_bi(wdev); if (!wdev_bi) @@ -2368,14 +2376,19 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, void *data), void *data) { + const struct wiphy_radio *radio = NULL; + const struct ieee80211_iface_combination *c, *cs; const struct ieee80211_regdomain *regdom; enum nl80211_dfs_regions region = 0; - int i, j, iftype; + int i, j, n, iftype; int num_interfaces = 0; u32 used_iftypes = 0; u32 beacon_int_gcd; bool beacon_int_different; + if (params->radio_idx >= 0) + radio = &wiphy->radio[params->radio_idx]; + /* * This is a bit strange, since the iteration used to rely only on * the data given by the driver, but here it now relies on context, @@ -2387,7 +2400,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, * interfaces (while being brought up) and channel/radar data. */ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, - &beacon_int_gcd, &beacon_int_different); + &beacon_int_gcd, &beacon_int_different, + params->radio_idx); if (params->radar_detect) { rcu_read_lock(); @@ -2404,13 +2418,18 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, used_iftypes |= BIT(iftype); } - for (i = 0; i < wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; + if (radio) { + cs = radio->iface_combinations; + n = radio->n_iface_combinations; + } else { + cs = wiphy->iface_combinations; + n = wiphy->n_iface_combinations; + } + for (i = 0; i < n; i++) { struct ieee80211_iface_limit *limits; u32 all_iftypes = 0; - c = &wiphy->iface_combinations[i]; - + c = &cs[i]; if (num_interfaces > c->max_interfaces) continue; if (params->num_different_channels > c->num_different_channels) @@ -2867,3 +2886,38 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type) return NULL; } EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa); + +static bool +ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, + u32 freq, u32 width) +{ + const struct wiphy_radio_freq_range *r; + int i; + + for (i = 0; i < radio->n_freq_range; i++) { + r = &radio->freq_range[i]; + if (freq - width / 2 >= r->start_freq && + freq + width / 2 <= r->end_freq) + return true; + } + + return false; +} + +bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, + const struct cfg80211_chan_def *chandef) +{ + u32 freq, width; + + freq = ieee80211_chandef_to_khz(chandef); + width = nl80211_chan_width_to_mhz(chandef->width); + if (!ieee80211_radio_freq_range_valid(radio, freq, width)) + return false; + + freq = MHZ_TO_KHZ(chandef->center_freq2); + if (freq && !ieee80211_radio_freq_range_valid(radio, freq, width)) + return false; + + return true; +} +EXPORT_SYMBOL(cfg80211_radio_chandef_valid); diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index abdd22007ed8..e4a79a9b2d58 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -427,8 +427,6 @@ static void __init remove_securityfs_measurement_lists(struct dentry **lists) kfree(lists); } - - securityfs_measurement_list_count = 0; } static int __init create_securityfs_measurement_lists(void) @@ -625,6 +623,7 @@ out: securityfs_remove(binary_runtime_measurements); remove_securityfs_measurement_lists(ascii_securityfs_measurement_lists); remove_securityfs_measurement_lists(binary_securityfs_measurement_lists); + securityfs_measurement_list_count = 0; securityfs_remove(ima_symlink); securityfs_remove(ima_dir); diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c index 51998d1c72ff..80c816922f78 100644 --- a/sound/pci/hda/cs35l41_hda_property.c +++ b/sound/pci/hda/cs35l41_hda_property.c @@ -128,8 +128,8 @@ static const struct cs35l41_config cs35l41_config_table[] = { { "17AA38B5", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, { "17AA38B6", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, { "17AA38B7", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 }, - { "17AA38C7", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 }, - { "17AA38C8", 4, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT }, 0, 2, -1, 1000, 4500, 24 }, + { "17AA38C7", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 }, + { "17AA38C8", 4, INTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, CS35L41_RIGHT, CS35L41_LEFT }, 0, 2, -1, 1000, 4500, 24 }, { "17AA38F9", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, { "17AA38FA", 2, EXTERNAL, { CS35L41_RIGHT, CS35L41_LEFT, 0, 0 }, 0, 2, -1, 0, 0, 0 }, {} diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 811e82474200..766f0b1d3e9d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10053,6 +10053,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED), SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), @@ -10383,6 +10384,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), + SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), @@ -10480,6 +10482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -10655,6 +10658,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c index 8ca8bcd177ab..dfda6bb5c6f8 100644 --- a/sound/soc/codecs/rt711-sdw.c +++ b/sound/soc/codecs/rt711-sdw.c @@ -38,7 +38,9 @@ static bool rt711_readable_register(struct device *dev, unsigned int reg) case 0x8300 ... 0x83ff: case 0x9c00 ... 0x9cff: case 0xb900 ... 0xb9ff: + case 0x752008: case 0x752009: + case 0x75200b: case 0x752011: case 0x75201a: case 0x752045: diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index c61d298ea6b3..1c823f9eea57 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -617,12 +617,6 @@ static int hda_dai_suspend(struct hdac_bus *bus) sdai = swidget->private; ops = sdai->platform_private; - ret = hda_link_dma_cleanup(hext_stream->link_substream, - hext_stream, - cpu_dai); - if (ret < 0) - return ret; - /* for consistency with TRIGGER_SUSPEND */ if (ops->post_trigger) { ret = ops->post_trigger(sdev, cpu_dai, @@ -631,6 +625,12 @@ static int hda_dai_suspend(struct hdac_bus *bus) if (ret < 0) return ret; } + + ret = hda_link_dma_cleanup(hext_stream->link_substream, + hext_stream, + cpu_dai); + if (ret < 0) + return ret; } } diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c index 9fb8521b896b..f6e24edd7adb 100644 --- a/sound/soc/sof/intel/hda-pcm.c +++ b/sound/soc/sof/intel/hda-pcm.c @@ -258,6 +258,12 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev, snd_pcm_hw_constraint_integer(substream->runtime, SNDRV_PCM_HW_PARAM_PERIODS); + /* Limit the maximum number of periods to not exceed the BDL entries count */ + if (runtime->hw.periods_max > HDA_DSP_MAX_BDL_ENTRIES) + snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS, + runtime->hw.periods_min, + HDA_DSP_MAX_BDL_ENTRIES); + /* Only S16 and S32 supported by HDA hardware when used without DSP */ if (sdev->dspless_mode_selected) snd_pcm_hw_constraint_mask64(substream->runtime, SNDRV_PCM_HW_PARAM_FORMAT, diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 51eaed76db97..5a4d3240689e 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -852,24 +852,41 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool { struct bpf_map *map; char ident[256]; - size_t i; + size_t i, map_sz; if (!map_cnt) return; + /* for backward compatibility with old libbpf versions that don't + * handle new BPF skeleton with new struct bpf_map_skeleton definition + * that includes link field, avoid specifying new increased size, + * unless we absolutely have to (i.e., if there are struct_ops maps + * present) + */ + map_sz = offsetof(struct bpf_map_skeleton, link); + if (populate_links) { + bpf_object__for_each_map(map, obj) { + if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) { + map_sz = sizeof(struct bpf_map_skeleton); + break; + } + } + } + codegen("\ \n\ - \n\ + \n\ /* maps */ \n\ s->map_cnt = %zu; \n\ - s->map_skel_sz = sizeof(*s->maps); \n\ - s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ + s->map_skel_sz = %zu; \n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\ + sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\ if (!s->maps) { \n\ err = -ENOMEM; \n\ goto err; \n\ } \n\ ", - map_cnt + map_cnt, map_sz, map_sz, map_sz ); i = 0; bpf_object__for_each_map(map, obj) { @@ -878,23 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool codegen("\ \n\ - \n\ - s->maps[%zu].name = \"%s\"; \n\ - s->maps[%zu].map = &obj->maps.%s; \n\ + \n\ + map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\ + map->name = \"%s\"; \n\ + map->map = &obj->maps.%s; \n\ ", - i, bpf_map__name(map), i, ident); + i, bpf_map__name(map), ident); /* memory-mapped internal maps */ if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) { - printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", - i, ident); + printf("\tmap->mmaped = (void **)&obj->%s;\n", ident); } if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) { codegen("\ \n\ - s->maps[%zu].link = &obj->links.%s;\n\ - ", - i, ident); + map->link = &obj->links.%s; \n\ + ", ident); } i++; } @@ -1463,6 +1479,7 @@ static int do_skeleton(int argc, char **argv) %1$s__create_skeleton(struct %1$s *obj) \n\ { \n\ struct bpf_object_skeleton *s; \n\ + struct bpf_map_skeleton *map __attribute__((unused));\n\ int err; \n\ \n\ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ @@ -1753,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv) { \n\ struct %1$s *obj; \n\ struct bpf_object_subskeleton *s; \n\ + struct bpf_map_skeleton *map __attribute__((unused));\n\ int err; \n\ \n\ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 30f121754d83..a3be6f8fac09 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -13712,14 +13712,15 @@ int libbpf_num_possible_cpus(void) static int populate_skeleton_maps(const struct bpf_object *obj, struct bpf_map_skeleton *maps, - size_t map_cnt) + size_t map_cnt, size_t map_skel_sz) { int i; for (i = 0; i < map_cnt; i++) { - struct bpf_map **map = maps[i].map; - const char *name = maps[i].name; - void **mmaped = maps[i].mmaped; + struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; + struct bpf_map **map = map_skel->map; + const char *name = map_skel->name; + void **mmaped = map_skel->mmaped; *map = bpf_object__find_map_by_name(obj, name); if (!*map) { @@ -13736,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj, static int populate_skeleton_progs(const struct bpf_object *obj, struct bpf_prog_skeleton *progs, - size_t prog_cnt) + size_t prog_cnt, size_t prog_skel_sz) { int i; for (i = 0; i < prog_cnt; i++) { - struct bpf_program **prog = progs[i].prog; - const char *name = progs[i].name; + struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; + struct bpf_program **prog = prog_skel->prog; + const char *name = prog_skel->name; *prog = bpf_object__find_program_by_name(obj, name); if (!*prog) { @@ -13783,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, } *s->obj = obj; - err = populate_skeleton_maps(obj, s->maps, s->map_cnt); + err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); return libbpf_err(err); } - err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); + err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); return libbpf_err(err); @@ -13819,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) return libbpf_err(-errno); } - err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); + err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } - err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); + err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { - var_skel = &s->vars[var_idx]; + var_skel = (void *)s->vars + var_idx * s->var_skel_sz; map = *var_skel->map; map_type_id = bpf_map__btf_value_type_id(map); map_type = btf__type_by_id(btf, map_type_id); @@ -13879,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s) } for (i = 0; i < s->map_cnt; i++) { - struct bpf_map *map = *s->maps[i].map; + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_map *map = *map_skel->map; size_t mmap_sz = bpf_map_mmap_sz(map); int prot, map_fd = map->fd; - void **mmaped = s->maps[i].mmaped; + void **mmaped = map_skel->mmaped; if (!mmaped) continue; @@ -13930,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) int i, err; for (i = 0; i < s->prog_cnt; i++) { - struct bpf_program *prog = *s->progs[i].prog; - struct bpf_link **link = s->progs[i].link; + struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; + struct bpf_program *prog = *prog_skel->prog; + struct bpf_link **link = prog_skel->link; if (!prog->autoload || !prog->autoattach) continue; @@ -13963,31 +13967,34 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) */ } - /* Skeleton is created with earlier version of bpftool - * which does not support auto-attachment - */ - if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) - return 0; for (i = 0; i < s->map_cnt; i++) { - struct bpf_map *map = *s->maps[i].map; - struct bpf_link **link = s->maps[i].link; + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_map *map = *map_skel->map; + struct bpf_link **link; if (!map->autocreate || !map->autoattach) continue; - if (*link) - continue; - /* only struct_ops maps can be attached */ if (!bpf_map__is_struct_ops(map)) continue; - *link = bpf_map__attach_struct_ops(map); + /* skeleton is created with earlier version of bpftool, notify user */ + if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { + pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", + bpf_map__name(map)); + continue; + } + + link = map_skel->link; + if (*link) + continue; + + *link = bpf_map__attach_struct_ops(map); if (!*link) { err = -errno; - pr_warn("map '%s': failed to auto-attach: %d\n", - bpf_map__name(map), err); + pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); return libbpf_err(err); } } @@ -14000,7 +14007,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) int i; for (i = 0; i < s->prog_cnt; i++) { - struct bpf_link **link = s->progs[i].link; + struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; + struct bpf_link **link = prog_skel->link; bpf_link__destroy(*link); *link = NULL; @@ -14010,7 +14018,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) return; for (i = 0; i < s->map_cnt; i++) { - struct bpf_link **link = s->maps[i].link; + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_link **link = map_skel->link; if (link) { bpf_link__destroy(*link); diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c index 233f2b6edf52..49b79cf0c5cc 100644 --- a/tools/perf/util/comm.c +++ b/tools/perf/util/comm.c @@ -86,14 +86,6 @@ static struct comm_str *comm_str__new(const char *str) return result; } -static int comm_str__cmp(const void *_lhs, const void *_rhs) -{ - const struct comm_str *lhs = *(const struct comm_str * const *)_lhs; - const struct comm_str *rhs = *(const struct comm_str * const *)_rhs; - - return strcmp(comm_str__str(lhs), comm_str__str(rhs)); -} - static int comm_str__search(const void *_key, const void *_member) { const char *key = _key; @@ -169,9 +161,24 @@ static struct comm_str *comm_strs__findnew(const char *str) } result = comm_str__new(str); if (result) { - comm_strs->strs[comm_strs->num_strs++] = result; - qsort(comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *), - comm_str__cmp); + int low = 0, high = comm_strs->num_strs - 1; + int insert = comm_strs->num_strs; /* Default to inserting at the end. */ + + while (low <= high) { + int mid = low + (high - low) / 2; + int cmp = strcmp(comm_str__str(comm_strs->strs[mid]), str); + + if (cmp < 0) { + low = mid + 1; + } else { + high = mid - 1; + insert = mid; + } + } + memmove(&comm_strs->strs[insert + 1], &comm_strs->strs[insert], + (comm_strs->num_strs - insert) * sizeof(struct comm_str *)); + comm_strs->num_strs++; + comm_strs->strs[insert] = result; } } up_write(&comm_strs->lock); diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c index ab3d0c01dd63..a69a9c661200 100644 --- a/tools/perf/util/dsos.c +++ b/tools/perf/util/dsos.c @@ -203,11 +203,27 @@ int __dsos__add(struct dsos *dsos, struct dso *dso) dsos->dsos = temp; dsos->allocated = to_allocate; } - dsos->dsos[dsos->cnt++] = dso__get(dso); - if (dsos->cnt >= 2 && dsos->sorted) { - dsos->sorted = dsos__cmp_long_name_id_short_name(&dsos->dsos[dsos->cnt - 2], - &dsos->dsos[dsos->cnt - 1]) - <= 0; + if (!dsos->sorted) { + dsos->dsos[dsos->cnt++] = dso__get(dso); + } else { + int low = 0, high = dsos->cnt - 1; + int insert = dsos->cnt; /* Default to inserting at the end. */ + + while (low <= high) { + int mid = low + (high - low) / 2; + int cmp = dsos__cmp_long_name_id_short_name(&dsos->dsos[mid], &dso); + + if (cmp < 0) { + low = mid + 1; + } else { + high = mid - 1; + insert = mid; + } + } + memmove(&dsos->dsos[insert + 1], &dsos->dsos[insert], + (dsos->cnt - insert) * sizeof(struct dso *)); + dsos->cnt++; + dsos->dsos[insert] = dso__get(dso); } dso__set_dsos(dso, dsos); return 0; diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 901349da680f..3c7c3e79aa93 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,5 +1,6 @@ bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 +fexit_sleep # The test never returns. The remaining tests cannot start. kprobe_multi_bench_attach # needs CONFIG_FPROBE kprobe_multi_test # needs CONFIG_FPROBE module_attach # prog 'kprobe_multi': failed to auto-attach: -95 diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 5291e97df749..4ca84c8d9116 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -58,9 +58,12 @@ CONFIG_MPLS=y CONFIG_MPLS_IPTUNNEL=y CONFIG_MPLS_ROUTING=y CONFIG_MPTCP=y +CONFIG_NET_ACT_SKBMOD=y +CONFIG_NET_CLS=y CONFIG_NET_CLS_ACT=y CONFIG_NET_CLS_BPF=y CONFIG_NET_CLS_FLOWER=y +CONFIG_NET_CLS_MATCHALL=y CONFIG_NET_FOU=y CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_NET_IPGRE=y diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 44c2c8fa542a..e0cba4178e41 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -106,7 +106,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl } if (type == SOCK_STREAM) { - if (listen(fd, 1) < 0) { + if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) { log_err("Failed to listed on socket"); goto error_close; } diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 9ea36524b9db..aac5b94d6379 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -25,6 +25,16 @@ struct network_helper_opts { int timeout_ms; bool must_fail; int proto; + /* +ve: Passed to listen() as-is. + * 0: Default when the test does not set + * a particular value during the struct init. + * It is changed to 1 before passing to listen(). + * Most tests only have one on-going connection. + * -ve: It is changed to 0 before passing to listen(). + * It is useful to force syncookie without + * changing the "tcp_syncookies" sysctl from 1 to 2. + */ + int backlog; int (*post_socket_cb)(int fd, void *opts); void *cb_opts; }; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index bceff5900016..63422f4f3896 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -411,7 +411,8 @@ static void test_update_ca(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; do_test(&opts); saved_ca1_cnt = skel->bss->ca1_cnt; @@ -425,6 +426,7 @@ static void test_update_ca(void) ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } @@ -447,7 +449,8 @@ static void test_update_wrong(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; do_test(&opts); saved_ca1_cnt = skel->bss->ca1_cnt; @@ -460,6 +463,7 @@ static void test_update_wrong(void) ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } @@ -481,7 +485,8 @@ static void test_mixed_links(void) return; link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link); - ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"); + if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl")) + goto out; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); ASSERT_OK_PTR(link, "attach_struct_ops"); @@ -494,6 +499,7 @@ static void test_mixed_links(void) bpf_link__destroy(link); bpf_link__destroy(link_nl); +out: tcp_ca_update__destroy(skel); } @@ -536,7 +542,8 @@ static void test_link_replace(void) bpf_link__destroy(link); link = bpf_map__attach_struct_ops(skel->maps.ca_update_2); - ASSERT_OK_PTR(link, "attach_struct_ops_2nd"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd")) + goto out; /* BPF_F_REPLACE with a wrong old map Fd. It should fail! * @@ -559,6 +566,7 @@ static void test_link_replace(void) bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 597d0467a926..ae87c00867ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -77,6 +77,12 @@ struct test { bool reuseport_has_conns; /* Add a connected socket to reuseport group */ }; +struct cb_opts { + int family; + int sotype; + bool reuseport; +}; + static __u32 duration; /* for CHECK macro */ static bool is_ipv6(const char *ip) @@ -142,19 +148,14 @@ static int make_socket(int sotype, const char *ip, int port, return fd; } -static int make_server(int sotype, const char *ip, int port, - struct bpf_program *reuseport_prog) +static int setsockopts(int fd, void *opts) { - struct sockaddr_storage addr = {0}; + struct cb_opts *co = (struct cb_opts *)opts; const int one = 1; - int err, fd = -1; - - fd = make_socket(sotype, ip, port, &addr); - if (fd < 0) - return -1; + int err = 0; /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ - if (sotype == SOCK_DGRAM) { + if (co->sotype == SOCK_DGRAM) { err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { @@ -163,7 +164,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) { err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { @@ -172,7 +173,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (sotype == SOCK_STREAM) { + if (co->sotype == SOCK_STREAM) { err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { @@ -181,7 +182,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (reuseport_prog) { + if (co->reuseport) { err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { @@ -190,19 +191,28 @@ static int make_server(int sotype, const char *ip, int port, } } - err = bind(fd, (void *)&addr, inetaddr_len(&addr)); - if (CHECK(err, "bind", "failed\n")) { - log_err("failed to bind listen socket"); - goto fail; - } +fail: + return err; +} - if (sotype == SOCK_STREAM) { - err = listen(fd, SOMAXCONN); - if (CHECK(err, "make_server", "listen")) { - log_err("failed to listen on port %d", port); - goto fail; - } - } +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct cb_opts cb_opts = { + .family = is_ipv6(ip) ? AF_INET6 : AF_INET, + .sotype = sotype, + .reuseport = reuseport_prog, + }; + struct network_helper_opts opts = { + .backlog = SOMAXCONN, + .post_socket_cb = setsockopts, + .cb_opts = &cb_opts, + }; + int err, fd; + + fd = start_server_str(cb_opts.family, sotype, ip, port, &opts); + if (!ASSERT_OK_FD(fd, "start_server_str")) + return -1; /* Late attach reuseport prog so we can have one init path */ if (reuseport_prog) { @@ -406,18 +416,12 @@ static int udp_recv_send(int server_fd) } /* Reply from original destination address. */ - fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); - if (CHECK(fd < 0, "socket", "failed\n")) { + fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL); + if (!ASSERT_OK_FD(fd, "start_server_addr")) { log_err("failed to create tx socket"); return -1; } - ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); - if (CHECK(ret, "bind", "failed\n")) { - log_err("failed to bind tx socket"); - goto out; - } - msg.msg_control = NULL; msg.msg_controllen = 0; n = sendmsg(fd, &msg, 0); @@ -629,9 +633,6 @@ static void run_lookup_prog(const struct test *t) * BPF socket lookup. */ if (t->reuseport_has_conns) { - struct sockaddr_storage addr = {}; - socklen_t len = sizeof(addr); - /* Add an extra socket to reuseport group */ reuse_conn_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, @@ -639,12 +640,9 @@ static void run_lookup_prog(const struct test *t) if (reuse_conn_fd < 0) goto close; - /* Connect the extra socket to itself */ - err = getsockname(reuse_conn_fd, (void *)&addr, &len); - if (CHECK(err, "getsockname", "errno %d\n", errno)) - goto close; - err = connect(reuse_conn_fd, (void *)&addr, len); - if (CHECK(err, "connect", "errno %d\n", errno)) + /* Connect the extra socket to itself */ + err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0); + if (!ASSERT_OK(err, "connect_fd_to_fd")) goto close; } @@ -994,7 +992,7 @@ static void drop_on_reuseport(const struct test *t) err = update_lookup_map(t->sock_map, SERVER_A, server1); if (err) - goto detach; + goto close_srv1; /* second server on destination address we should never reach */ server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, diff --git a/tools/testing/selftests/bpf/prog_tests/tc_links.c b/tools/testing/selftests/bpf/prog_tests/tc_links.c index bc9841144685..1af9ec1149aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_links.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_links.c @@ -9,6 +9,8 @@ #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" #include "test_tc_link.skel.h" + +#include "netlink_helpers.h" #include "tc_helpers.h" void serial_test_tc_links_basic(void) @@ -1787,6 +1789,65 @@ void serial_test_tc_links_ingress(void) test_tc_links_ingress(BPF_TCX_INGRESS, false, false); } +struct qdisc_req { + struct nlmsghdr n; + struct tcmsg t; + char buf[1024]; +}; + +static int qdisc_replace(int ifindex, const char *kind, bool block) +{ + struct rtnl_handle rth = { .fd = -1 }; + struct qdisc_req req; + int err; + + err = rtnl_open(&rth, 0); + if (!ASSERT_OK(err, "open_rtnetlink")) + return err; + + memset(&req, 0, sizeof(req)); + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); + req.n.nlmsg_flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST; + req.n.nlmsg_type = RTM_NEWQDISC; + req.t.tcm_family = AF_UNSPEC; + req.t.tcm_ifindex = ifindex; + req.t.tcm_parent = 0xfffffff1; + + addattr_l(&req.n, sizeof(req), TCA_KIND, kind, strlen(kind) + 1); + if (block) + addattr32(&req.n, sizeof(req), TCA_INGRESS_BLOCK, 1); + + err = rtnl_talk(&rth, &req.n, NULL); + ASSERT_OK(err, "talk_rtnetlink"); + rtnl_close(&rth); + return err; +} + +void serial_test_tc_links_dev_chain0(void) +{ + int err, ifindex; + + ASSERT_OK(system("ip link add dev foo type veth peer name bar"), "add veth"); + ifindex = if_nametoindex("foo"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + err = qdisc_replace(ifindex, "ingress", true); + if (!ASSERT_OK(err, "attaching ingress")) + goto cleanup; + ASSERT_OK(system("tc filter add block 1 matchall action skbmod swap mac"), "add block"); + err = qdisc_replace(ifindex, "clsact", false); + if (!ASSERT_OK(err, "attaching clsact")) + goto cleanup; + /* Heuristic: kern_sync_rcu() alone does not work; a wait-time of ~5s + * triggered the issue without the fix reliably 100% of the time. + */ + sleep(5); + ASSERT_OK(system("tc filter add dev foo ingress matchall action skbmod swap mac"), "add filter"); +cleanup: + ASSERT_OK(system("ip link del dev foo"), "del veth"); + ASSERT_EQ(if_nametoindex("foo"), 0, "foo removed"); + ASSERT_EQ(if_nametoindex("bar"), 0, "bar removed"); +} + static void test_tc_links_dev_mixed(int target) { LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c new file mode 100644 index 000000000000..871d16cb95cf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include <sched.h> +#include <test_progs.h> +#include <pthread.h> +#include <network_helpers.h> + +#include "timer_lockup.skel.h" + +static long cpu; +static int *timer1_err; +static int *timer2_err; +static bool skip; + +volatile int k = 0; + +static void *timer_lockup_thread(void *arg) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1000, + ); + int i, prog_fd = *(int *)arg; + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset); + ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), + &cpuset), + "cpu affinity"); + + for (i = 0; !READ_ONCE(*timer1_err) && !READ_ONCE(*timer2_err); i++) { + bpf_prog_test_run_opts(prog_fd, &opts); + /* Skip the test if we can't reproduce the race in a reasonable + * amount of time. + */ + if (i > 50) { + WRITE_ONCE(skip, true); + break; + } + } + + return NULL; +} + +void test_timer_lockup(void) +{ + int timer1_prog, timer2_prog; + struct timer_lockup *skel; + pthread_t thrds[2]; + void *ret; + + skel = timer_lockup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load")) + return; + + timer1_prog = bpf_program__fd(skel->progs.timer1_prog); + timer2_prog = bpf_program__fd(skel->progs.timer2_prog); + + timer1_err = &skel->bss->timer1_err; + timer2_err = &skel->bss->timer2_err; + + if (!ASSERT_OK(pthread_create(&thrds[0], NULL, timer_lockup_thread, + &timer1_prog), + "pthread_create thread1")) + goto out; + if (!ASSERT_OK(pthread_create(&thrds[1], NULL, timer_lockup_thread, + &timer2_prog), + "pthread_create thread2")) { + pthread_exit(&thrds[0]); + goto out; + } + + pthread_join(thrds[1], &ret); + pthread_join(thrds[0], &ret); + + if (skip) { + test__skip(); + goto out; + } + + if (*timer1_err != -EDEADLK && *timer1_err != 0) + ASSERT_FAIL("timer1_err bad value"); + if (*timer2_err != -EDEADLK && *timer2_err != 0) + ASSERT_FAIL("timer2_err bad value"); +out: + timer_lockup__destroy(skel); + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index f09505f8b038..53d6ad8c2257 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void) prog = bpf_object__next_program(obj, NULL); if (bpf_object__load(obj)) - return; + goto out; prog_fd = bpf_program__fd(prog); diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c index ea39497f11ed..3568ec450100 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c @@ -31,14 +31,6 @@ int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_ return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc") -int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags) -{ - bpf_cpumask_first_zero(&task->cpus_mask); - return 0; -} - /* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */ SEC("tp_btf/tcp_probe") __failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_") diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c index 833840bffd3b..2b66953ca82e 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_success.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c @@ -32,3 +32,11 @@ int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb) bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0); return 0; } + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags) +{ + bpf_cpumask_first_zero(&task->cpus_mask); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/timer_lockup.c b/tools/testing/selftests/bpf/progs/timer_lockup.c new file mode 100644 index 000000000000..3e520133281e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/timer_lockup.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <time.h> +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct elem { + struct bpf_timer t; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} timer1_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} timer2_map SEC(".maps"); + +int timer1_err; +int timer2_err; + +static int timer_cb1(void *map, int *k, struct elem *v) +{ + struct bpf_timer *timer; + int key = 0; + + timer = bpf_map_lookup_elem(&timer2_map, &key); + if (timer) + timer2_err = bpf_timer_cancel(timer); + + return 0; +} + +static int timer_cb2(void *map, int *k, struct elem *v) +{ + struct bpf_timer *timer; + int key = 0; + + timer = bpf_map_lookup_elem(&timer1_map, &key); + if (timer) + timer1_err = bpf_timer_cancel(timer); + + return 0; +} + +SEC("tc") +int timer1_prog(void *ctx) +{ + struct bpf_timer *timer; + int key = 0; + + timer = bpf_map_lookup_elem(&timer1_map, &key); + if (timer) { + bpf_timer_init(timer, &timer1_map, CLOCK_BOOTTIME); + bpf_timer_set_callback(timer, timer_cb1); + bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN); + } + + return 0; +} + +SEC("tc") +int timer2_prog(void *ctx) +{ + struct bpf_timer *timer; + int key = 0; + + timer = bpf_map_lookup_elem(&timer2_map, &key); + if (timer) { + bpf_timer_init(timer, &timer2_map, CLOCK_BOOTTIME); + bpf_timer_set_callback(timer, timer_cb2); + bpf_timer_start(timer, 1, BPF_F_TIMER_CPU_PIN); + } + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c index 15209650f73b..7fdc7b23ee74 100644 --- a/tools/testing/selftests/bpf/progs/xdp_flowtable.c +++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c @@ -58,6 +58,10 @@ static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end, return true; } +struct flow_ports___local { + __be16 source, dest; +} __attribute__((preserve_access_index)); + SEC("xdp.frags") int xdp_flowtable_do_lookup(struct xdp_md *ctx) { @@ -69,7 +73,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx) }; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; - struct flow_ports *ports; + struct flow_ports___local *ports; __u32 *val, key = 0; if (eth + 1 > data_end) @@ -79,7 +83,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx) case bpf_htons(ETH_P_IP): { struct iphdr *iph = data + sizeof(*eth); - ports = (struct flow_ports *)(iph + 1); + ports = (struct flow_ports___local *)(iph + 1); if (ports + 1 > data_end) return XDP_PASS; @@ -106,7 +110,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx) struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst; struct ipv6hdr *ip6h = data + sizeof(*eth); - ports = (struct flow_ports *)(ip6h + 1); + ports = (struct flow_ports___local *)(ip6h + 1); if (ports + 1 > data_end) return XDP_PASS; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 0ba5a20b19ba..51341d50213b 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -377,6 +377,15 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_OK_FD(fd, name) ({ \ + static int duration = 0; \ + int ___fd = (fd); \ + bool ___ok = ___fd >= 0; \ + CHECK(!___ok, (name), "unexpected fd: %d (errno %d)\n", \ + ___fd, errno); \ + ___ok; \ +}) + #define SYS(goto_label, fmt, ...) \ ({ \ char cmd[1024]; \ diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index ab25a81fd3a1..d0cdd156cd55 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -76,7 +76,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc", + .errstr = "arg#0 expected pointer to ctx, but got PTR", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_pass_ctx", 2 }, }, @@ -276,6 +276,19 @@ .result = ACCEPT, }, { + "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_EXT, + .result = REJECT, + .errstr = "Tracing programs must provide btf_id", + .fixup_kfunc_btf_id = { + { "bpf_dynptr_from_skb", 0 }, + }, +}, +{ "calls: basic sanity", .insns = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh index 194c8fc2e55a..438280e68434 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh @@ -428,8 +428,8 @@ mptcp_lib_check_tools() { } mptcp_lib_ns_init() { - if ! setup_ns ${@}; then - mptcp_lib_pr_fail "Failed to setup namespace ${@}" + if ! setup_ns "${@}"; then + mptcp_lib_pr_fail "Failed to setup namespaces ${*}" exit ${KSFT_FAIL} fi diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index bc71dbc18b21..cc0bfae2bafa 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -11,6 +11,11 @@ ksft_skip=4 PAUSE_ON_FAIL=no VERBOSE=0 TRACING=0 +WAIT_TIMEOUT=5 + +if test "X$KSFT_MACHINE_SLOW" == "Xyes"; then + WAIT_TIMEOUT=10 +fi tests=" arp_ping eth-arp: Basic arp ping between two NS @@ -29,6 +34,30 @@ info() { [ $VERBOSE = 0 ] || echo $* } +ovs_wait() { + info "waiting $WAIT_TIMEOUT s for: $@" + + if "$@" ; then + info "wait succeeded immediately" + return 0 + fi + + # A quick re-check helps speed up small races in fast systems. + # However, fractional sleeps might not necessarily work. + local start=0 + sleep 0.1 || { sleep 1; start=1; } + + for (( i=start; i<WAIT_TIMEOUT; i++ )); do + if "$@" ; then + info "wait succeeded after $i seconds" + return 0 + fi + sleep 1 + done + info "wait failed after $i seconds" + return 1 +} + ovs_base=`pwd` sbxs= sbx_add () { @@ -278,20 +307,19 @@ test_psample() { # Record psample data. ovs_spawn_daemon "test_psample" python3 $ovs_base/ovs-dpctl.py psample-events + ovs_wait grep -q "listening for psample events" ${ovs_dir}/stdout # Send a single ping. - sleep 1 ovs_sbx "test_psample" ip netns exec client ping -I c1 172.31.110.20 -c 1 || return 1 - sleep 1 # We should have received one userspace action upcall and 2 psample packets. - grep -E "userspace action command" $ovs_dir/s0.out >/dev/null 2>&1 || return 1 + ovs_wait grep -q "userspace action command" $ovs_dir/s0.out || return 1 # client -> server samples should only contain the first 14 bytes of the packet. - grep -E "rate:4294967295,group:1,cookie:c0ffee data:[0-9a-f]{28}$" \ - $ovs_dir/stdout >/dev/null 2>&1 || return 1 - grep -E "rate:4294967295,group:2,cookie:eeff0c" \ - $ovs_dir/stdout >/dev/null 2>&1 || return 1 + ovs_wait grep -qE "rate:4294967295,group:1,cookie:c0ffee data:[0-9a-f]{28}$" \ + $ovs_dir/stdout || return 1 + + ovs_wait grep -q "rate:4294967295,group:2,cookie:eeff0c" $ovs_dir/stdout || return 1 return 0 } @@ -711,7 +739,8 @@ test_upcall_interfaces() { ovs_add_netns_and_veths "test_upcall_interfaces" ui0 upc left0 l0 \ 172.31.110.1/24 -u || return 1 - sleep 1 + ovs_wait grep -q "listening on upcall packet handler" ${ovs_dir}/left0.out + info "sending arping" ip netns exec upc arping -I l0 172.31.110.20 -c 1 \ >$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py index 1e15b0818074..8a0396bfaf99 100644 --- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py @@ -2520,6 +2520,7 @@ class PsampleEvent(EventSocket): marshal_class = psample_msg def read_samples(self): + print("listening for psample events", flush=True) while True: try: for msg in self.get(): diff --git a/tools/testing/selftests/net/tcp_ao/self-connect.c b/tools/testing/selftests/net/tcp_ao/self-connect.c index e154d9e198a9..a5698b0a3718 100644 --- a/tools/testing/selftests/net/tcp_ao/self-connect.c +++ b/tools/testing/selftests/net/tcp_ao/self-connect.c @@ -30,8 +30,6 @@ static void setup_lo_intf(const char *lo_intf) static void tcp_self_connect(const char *tst, unsigned int port, bool different_keyids, bool check_restore) { - uint64_t before_challenge_ack, after_challenge_ack; - uint64_t before_syn_challenge, after_syn_challenge; struct tcp_ao_counters before_ao, after_ao; uint64_t before_aogood, after_aogood; struct netstat *ns_before, *ns_after; @@ -62,8 +60,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_before = netstat_read(); before_aogood = netstat_get(ns_before, "TCPAOGood", NULL); - before_challenge_ack = netstat_get(ns_before, "TCPChallengeACK", NULL); - before_syn_challenge = netstat_get(ns_before, "TCPSYNChallenge", NULL); if (test_get_tcp_ao_counters(sk, &before_ao)) test_error("test_get_tcp_ao_counters()"); @@ -82,8 +78,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_after = netstat_read(); after_aogood = netstat_get(ns_after, "TCPAOGood", NULL); - after_challenge_ack = netstat_get(ns_after, "TCPChallengeACK", NULL); - after_syn_challenge = netstat_get(ns_after, "TCPSYNChallenge", NULL); if (test_get_tcp_ao_counters(sk, &after_ao)) test_error("test_get_tcp_ao_counters()"); if (!check_restore) { @@ -98,18 +92,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, close(sk); return; } - if (after_challenge_ack <= before_challenge_ack || - after_syn_challenge <= before_syn_challenge) { - /* - * It's also meant to test simultaneous open, so check - * these counters as well. - */ - test_fail("%s: Didn't challenge SYN or ACK: %zu <= %zu OR %zu <= %zu", - tst, after_challenge_ack, before_challenge_ack, - after_syn_challenge, before_syn_challenge); - close(sk); - return; - } if (test_tcp_ao_counters_cmp(tst, &before_ao, &after_ao, TEST_CNT_GOOD)) { close(sk); diff --git a/tools/testing/selftests/powerpc/flags.mk b/tools/testing/selftests/powerpc/flags.mk index b909bee3cb2a..abb9e58d95c4 100644 --- a/tools/testing/selftests/powerpc/flags.mk +++ b/tools/testing/selftests/powerpc/flags.mk @@ -5,8 +5,5 @@ GIT_VERSION := $(shell git describe --always --long --dirty || echo "unknown") export GIT_VERSION endif -ifeq ($(CFLAGS),) -CFLAGS := -std=gnu99 -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(selfdir)/powerpc/include $(CFLAGS) +CFLAGS := -std=gnu99 -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(selfdir)/powerpc/include $(USERCFLAGS) export CFLAGS -endif - diff --git a/tools/testing/selftests/riscv/sigreturn/sigreturn.c b/tools/testing/selftests/riscv/sigreturn/sigreturn.c index 62397d5934f1..ed351a1cb917 100644 --- a/tools/testing/selftests/riscv/sigreturn/sigreturn.c +++ b/tools/testing/selftests/riscv/sigreturn/sigreturn.c @@ -51,7 +51,7 @@ static int vector_sigreturn(int data, void (*handler)(int, siginfo_t *, void *)) asm(".option push \n\ .option arch, +v \n\ - vsetivli x0, 1, e32, ta, ma \n\ + vsetivli x0, 1, e32, m1, ta, ma \n\ vmv.s.x v0, %1 \n\ # Generate SIGSEGV \n\ lw a0, 0(x0) \n\ diff --git a/tools/testing/selftests/timens/exec.c b/tools/testing/selftests/timens/exec.c index e40dc5be2f66..d12ff955de0d 100644 --- a/tools/testing/selftests/timens/exec.c +++ b/tools/testing/selftests/timens/exec.c @@ -30,7 +30,7 @@ int main(int argc, char *argv[]) for (i = 0; i < 2; i++) { _gettime(CLOCK_MONOTONIC, &tst, i); - if (abs(tst.tv_sec - now.tv_sec) > 5) + if (labs(tst.tv_sec - now.tv_sec) > 5) return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec); } return 0; @@ -50,7 +50,7 @@ int main(int argc, char *argv[]) for (i = 0; i < 2; i++) { _gettime(CLOCK_MONOTONIC, &tst, i); - if (abs(tst.tv_sec - now.tv_sec) > 5) + if (labs(tst.tv_sec - now.tv_sec) > 5) return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec); } @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) /* Check that a child process is in the new timens. */ for (i = 0; i < 2; i++) { _gettime(CLOCK_MONOTONIC, &tst, i); - if (abs(tst.tv_sec - now.tv_sec - OFFSET) > 5) + if (labs(tst.tv_sec - now.tv_sec - OFFSET) > 5) return pr_fail("%ld %ld\n", now.tv_sec + OFFSET, tst.tv_sec); } diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c index 5e7f0051bd7b..5b939f59dfa4 100644 --- a/tools/testing/selftests/timens/timer.c +++ b/tools/testing/selftests/timens/timer.c @@ -56,7 +56,7 @@ int run_test(int clockid, struct timespec now) return pr_perror("timerfd_gettime"); elapsed = new_value.it_value.tv_sec; - if (abs(elapsed - 3600) > 60) { + if (llabs(elapsed - 3600) > 60) { ksft_test_result_fail("clockid: %d elapsed: %lld\n", clockid, elapsed); return 1; diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c index 9edd43d6b2c1..a4196bbd6e33 100644 --- a/tools/testing/selftests/timens/timerfd.c +++ b/tools/testing/selftests/timens/timerfd.c @@ -61,7 +61,7 @@ int run_test(int clockid, struct timespec now) return pr_perror("timerfd_gettime(%d)", clockid); elapsed = new_value.it_value.tv_sec; - if (abs(elapsed - 3600) > 60) { + if (llabs(elapsed - 3600) > 60) { ksft_test_result_fail("clockid: %d elapsed: %lld\n", clockid, elapsed); return 1; diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c index beb7614941fb..5b8907bf451d 100644 --- a/tools/testing/selftests/timens/vfork_exec.c +++ b/tools/testing/selftests/timens/vfork_exec.c @@ -32,7 +32,7 @@ static void *tcheck(void *_args) for (i = 0; i < 2; i++) { _gettime(CLOCK_MONOTONIC, &tst, i); - if (abs(tst.tv_sec - now->tv_sec) > 5) { + if (labs(tst.tv_sec - now->tv_sec) > 5) { pr_fail("%s: in-thread: unexpected value: %ld (%ld)\n", args->tst_name, tst.tv_sec, now->tv_sec); return (void *)1UL; @@ -64,7 +64,7 @@ static int check(char *tst_name, struct timespec *now) for (i = 0; i < 2; i++) { _gettime(CLOCK_MONOTONIC, &tst, i); - if (abs(tst.tv_sec - now->tv_sec) > 5) + if (labs(tst.tv_sec - now->tv_sec) > 5) return pr_fail("%s: unexpected value: %ld (%ld)\n", tst_name, tst.tv_sec, now->tv_sec); } diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile index d53a4d8008f9..98d8ba2afa00 100644 --- a/tools/testing/selftests/vDSO/Makefile +++ b/tools/testing/selftests/vDSO/Makefile @@ -1,35 +1,30 @@ # SPDX-License-Identifier: GPL-2.0 -include ../lib.mk - uname_M := $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday $(OUTPUT)/vdso_test_getcpu -TEST_GEN_PROGS += $(OUTPUT)/vdso_test_abi -TEST_GEN_PROGS += $(OUTPUT)/vdso_test_clock_getres +TEST_GEN_PROGS := vdso_test_gettimeofday +TEST_GEN_PROGS += vdso_test_getcpu +TEST_GEN_PROGS += vdso_test_abi +TEST_GEN_PROGS += vdso_test_clock_getres ifeq ($(ARCH),$(filter $(ARCH),x86 x86_64)) -TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86 +TEST_GEN_PROGS += vdso_standalone_test_x86 endif -TEST_GEN_PROGS += $(OUTPUT)/vdso_test_correctness +TEST_GEN_PROGS += vdso_test_correctness CFLAGS := -std=gnu99 -CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector -LDFLAGS_vdso_test_correctness := -ldl + ifeq ($(CONFIG_X86_32),y) LDLIBS += -lgcc_s endif -all: $(TEST_GEN_PROGS) +include ../lib.mk $(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c $(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c $(OUTPUT)/vdso_test_abi: parse_vdso.c vdso_test_abi.c $(OUTPUT)/vdso_test_clock_getres: vdso_test_clock_getres.c + $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c - $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \ - vdso_standalone_test_x86.c parse_vdso.c \ - -o $@ +$(OUTPUT)/vdso_standalone_test_x86: CFLAGS +=-nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector + $(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c - $(CC) $(CFLAGS) \ - vdso_test_correctness.c \ - -o $@ \ - $(LDFLAGS_vdso_test_correctness) +$(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c index 413f75620a35..4ae417372e9e 100644 --- a/tools/testing/selftests/vDSO/parse_vdso.c +++ b/tools/testing/selftests/vDSO/parse_vdso.c @@ -55,14 +55,20 @@ static struct vdso_info ELF(Verdef) *verdef; } vdso_info; -/* Straight from the ELF specification. */ -static unsigned long elf_hash(const unsigned char *name) +/* + * Straight from the ELF specification...and then tweaked slightly, in order to + * avoid a few clang warnings. + */ +static unsigned long elf_hash(const char *name) { unsigned long h = 0, g; - while (*name) + const unsigned char *uch_name = (const unsigned char *)name; + + while (*uch_name) { - h = (h << 4) + *name++; - if (g = h & 0xf0000000) + h = (h << 4) + *uch_name++; + g = h & 0xf0000000; + if (g) h ^= g >> 24; h &= ~g; } diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c index 8a44ff973ee1..27f6fdf11969 100644 --- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c +++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c @@ -18,7 +18,7 @@ #include "parse_vdso.h" -/* We need a libc functions... */ +/* We need some libc functions... */ int strcmp(const char *a, const char *b) { /* This implementation is buggy: it never returns -1. */ @@ -34,6 +34,20 @@ int strcmp(const char *a, const char *b) return 0; } +/* + * The clang build needs this, although gcc does not. + * Stolen from lib/string.c. + */ +void *memcpy(void *dest, const void *src, size_t count) +{ + char *tmp = dest; + const char *s = src; + + while (count--) + *tmp++ = *s++; + return dest; +} + /* ...and two syscalls. This is x86-specific. */ static inline long x86_syscall3(long nr, long a0, long a1, long a2) { @@ -70,7 +84,7 @@ void to_base10(char *lastdig, time_t n) } } -__attribute__((externally_visible)) void c_main(void **stack) +void c_main(void **stack) { /* Parse the stack */ long argc = (long)*stack; diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile index e95bd56b332f..35856b11c143 100644 --- a/tools/testing/selftests/wireguard/qemu/Makefile +++ b/tools/testing/selftests/wireguard/qemu/Makefile @@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(HOST_ARCH),$(ARCH)) -QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi +QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off else -QEMU_MACHINE := -cpu max -machine microvm -no-acpi +QEMU_MACHINE := -cpu max -machine microvm,acpi=off endif else ifeq ($(ARCH),i686) CHOST := i686-linux-musl @@ -120,9 +120,9 @@ KERNEL_ARCH := x86 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage QEMU_VPORT_RESULT := virtio-serial-device ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) -QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi +QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off else -QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi +QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off endif else ifeq ($(ARCH),mips64) CHOST := mips64-linux-musl diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile index a7f56a09ca9f..6e0b4e95e230 100644 --- a/tools/testing/vsock/Makefile +++ b/tools/testing/vsock/Makefile @@ -13,3 +13,16 @@ CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-p clean: ${RM} *.o *.d vsock_test vsock_diag_test vsock_perf vsock_uring_test -include *.d + +VSOCK_INSTALL_PATH ?= + +install: all +ifdef VSOCK_INSTALL_PATH + mkdir -p $(VSOCK_INSTALL_PATH) + install -m 744 vsock_test $(VSOCK_INSTALL_PATH) + install -m 744 vsock_perf $(VSOCK_INSTALL_PATH) + install -m 744 vsock_diag_test $(VSOCK_INSTALL_PATH) + install -m 744 vsock_uring_test $(VSOCK_INSTALL_PATH) +else + $(error Error: set VSOCK_INSTALL_PATH to use install) +endif |