diff options
454 files changed, 11707 insertions, 5898 deletions
diff --git a/Documentation/devicetree/bindings/net/intel,ixp46x-ptp-timer.yaml b/Documentation/devicetree/bindings/net/intel,ixp46x-ptp-timer.yaml new file mode 100644 index 000000000000..8b9b3f915d92 --- /dev/null +++ b/Documentation/devicetree/bindings/net/intel,ixp46x-ptp-timer.yaml @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +# Copyright 2018 Linaro Ltd. +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/net/intel,ixp46x-ptp-timer.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Intel IXP46x PTP Timer (TSYNC) + +maintainers: + - Linus Walleij <[email protected]> + +description: | + The Intel IXP46x PTP timer is known in the manual as IEEE1588 Hardware + Assist and Time Synchronization Hardware Assist TSYNC provides a PTP + timer. It exists in the Intel IXP45x and IXP46x XScale SoCs. + +properties: + compatible: + const: intel,ixp46x-ptp-timer + + reg: + maxItems: 1 + + interrupts: + items: + - description: Interrupt to trigger master mode snapshot from the + PRP timer, usually a GPIO interrupt. + - description: Interrupt to trigger slave mode snapshot from the + PRP timer, usually a GPIO interrupt. + + interrupt-names: + items: + - const: master + - const: slave + +required: + - compatible + - reg + - interrupts + - interrupt-names + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + ptp-timer@c8010000 { + compatible = "intel,ixp46x-ptp-timer"; + reg = <0xc8010000 0x1000>; + interrupt-parent = <&gpio0>; + interrupts = <8 IRQ_TYPE_EDGE_FALLING>, <7 IRQ_TYPE_EDGE_FALLING>; + interrupt-names = "master", "slave"; + }; diff --git a/Documentation/devicetree/bindings/net/litex,liteeth.yaml b/Documentation/devicetree/bindings/net/litex,liteeth.yaml new file mode 100644 index 000000000000..76c164a8199a --- /dev/null +++ b/Documentation/devicetree/bindings/net/litex,liteeth.yaml @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/litex,liteeth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: LiteX LiteETH ethernet device + +maintainers: + - Joel Stanley <[email protected]> + +description: | + LiteETH is a small footprint and configurable Ethernet core for FPGA based + system on chips. + + The hardware source is Open Source and can be found on at + https://github.com/enjoy-digital/liteeth/. + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + const: litex,liteeth + + reg: + items: + - description: MAC registers + - description: MDIO registers + - description: Packet buffer + + reg-names: + items: + - const: mac + - const: mdio + - const: buffer + + interrupts: + maxItems: 1 + + litex,rx-slots: + description: Number of slots in the receive buffer + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + default: 2 + + litex,tx-slots: + description: Number of slots in the transmit buffer + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + default: 2 + + litex,slot-size: + description: Size in bytes of a slot in the tx/rx buffer + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0x800 + default: 0x800 + + mac-address: true + local-mac-address: true + phy-handle: true + + mdio: + $ref: mdio.yaml# + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + mac: ethernet@8020000 { + compatible = "litex,liteeth"; + reg = <0x8021000 0x100>, + <0x8020800 0x100>, + <0x8030000 0x2000>; + reg-names = "mac", "mdio", "buffer"; + litex,rx-slots = <2>; + litex,tx-slots = <2>; + litex,slot-size = <0x800>; + interrupts = <0x11 0x1>; + phy-handle = <ð_phy>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + eth_phy: ethernet-phy@0 { + reg = <0>; + }; + }; + }; +... + +# vim: set ts=2 sw=2 sts=2 tw=80 et cc=80 ft=yaml : diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml index 1d38ff76d18f..2b1f91603897 100644 --- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml +++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml @@ -24,10 +24,10 @@ allOf: select: properties: compatible: - items: - - enum: - - sifive,fu540-c000-ccache - - sifive,fu740-c000-ccache + contains: + enum: + - sifive,fu540-c000-ccache + - sifive,fu740-c000-ccache required: - compatible diff --git a/Documentation/networking/pktgen.rst b/Documentation/networking/pktgen.rst index 7afa1c9f1183..1225f0f63ff0 100644 --- a/Documentation/networking/pktgen.rst +++ b/Documentation/networking/pktgen.rst @@ -248,26 +248,24 @@ Usage::: -i : ($DEV) output interface/device (required) -s : ($PKT_SIZE) packet size - -d : ($DEST_IP) destination IP + -d : ($DEST_IP) destination IP. CIDR (e.g. 198.18.0.0/15) is also allowed -m : ($DST_MAC) destination MAC-addr + -p : ($DST_PORT) destination PORT range (e.g. 433-444) is also allowed -t : ($THREADS) threads to start + -f : ($F_THREAD) index of first thread (zero indexed CPU number) -c : ($SKB_CLONE) SKB clones send before alloc new SKB + -n : ($COUNT) num messages to send per thread, 0 means indefinitely -b : ($BURST) HW level bursting of SKBs -v : ($VERBOSE) verbose -x : ($DEBUG) debug + -6 : ($IP6) IPv6 + -w : ($DELAY) Tx Delay value (ns) + -a : ($APPEND) Script will not reset generator's state, but will append its config The global variables being set are also listed. E.g. the required interface/device parameter "-i" sets variable $DEV. Copy the pktgen_sampleXX scripts and modify them to fit your own needs. -The old scripts:: - - pktgen.conf-1-2 # 1 CPU 2 dev - pktgen.conf-1-1-rdos # 1 CPU 1 dev w. route DoS - pktgen.conf-1-1-ip6 # 1 CPU 1 dev ipv6 - pktgen.conf-1-1-ip6-rdos # 1 CPU 1 dev ipv6 w. route DoS - pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows. - Interrupt affinity =================== @@ -398,7 +396,7 @@ Current commands and configuration options References: - ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/ -- tp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/ +- ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/ Paper from Linux-Kongress in Erlangen 2004. - ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/pktgen_paper.pdf diff --git a/MAINTAINERS b/MAINTAINERS index 06e39d3eba93..6abfd3e36c31 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3866,6 +3866,16 @@ L: [email protected] S: Maintained F: drivers/mtd/nand/raw/brcmnand/ +BROADCOM STB PCIE DRIVER +M: Jim Quinlan <[email protected]> +M: Nicolas Saenz Julienne <[email protected]> +M: Florian Fainelli <[email protected]> +S: Maintained +F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml +F: drivers/pci/controller/pcie-brcmstb.c + BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli <[email protected]> @@ -4498,7 +4508,7 @@ L: [email protected] S: Supported W: https://clangbuiltlinux.github.io/ B: https://github.com/ClangBuiltLinux/linux/issues -C: irc://chat.freenode.net/clangbuiltlinux +C: irc://irc.libera.chat/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h F: scripts/clang-tools/ @@ -6952,7 +6962,7 @@ F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h EXFAT FILE SYSTEM -M: Namjae Jeon <[email protected]> +M: Namjae Jeon <[email protected]> M: Sungjong Seo <[email protected]> S: Maintained @@ -14457,6 +14467,13 @@ S: Maintained F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c +PCIE DRIVER FOR INTEL LGM GW SOC +M: Rahul Tanwar <[email protected]> +S: Maintained +F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml +F: drivers/pci/controller/dwc/pcie-intel-gw.c + PCIE DRIVER FOR MEDIATEK M: Ryder Lee <[email protected]> M: Jianjun Wang <[email protected]> @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Opossums on Parade # *DOCUMENTATION* diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index 3f35761dc9ff..23595fc5a29a 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -15,8 +15,6 @@ CONFIG_SLAB=y CONFIG_ARCH_NOMADIK=y CONFIG_MACH_NOMADIK_8815NHK=y CONFIG_AEABI=y -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set @@ -52,9 +50,9 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_GENERIC=y -CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_FSMC=y +CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y @@ -97,6 +95,7 @@ CONFIG_REGULATOR=y CONFIG_DRM=y CONFIG_DRM_PANEL_TPO_TPG110=y CONFIG_DRM_PL111=y +CONFIG_FB=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y CONFIG_FRAMEBUFFER_CONSOLE=y @@ -136,9 +135,8 @@ CONFIG_NLS_ISO8859_15=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_DES=y +# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index b5eadd70d903..cdc720f54daa 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -268,9 +268,23 @@ static struct platform_device ixp46x_i2c_controller = { .resource = ixp46x_i2c_resources }; +static struct resource ixp46x_ptp_resources[] = { + DEFINE_RES_MEM(IXP4XX_TIMESYNC_BASE_PHYS, SZ_4K), + DEFINE_RES_IRQ_NAMED(IRQ_IXP4XX_GPIO8, "master"), + DEFINE_RES_IRQ_NAMED(IRQ_IXP4XX_GPIO7, "slave"), +}; + +static struct platform_device ixp46x_ptp = { + .name = "ptp-ixp46x", + .id = -1, + .resource = ixp46x_ptp_resources, + .num_resources = ARRAY_SIZE(ixp46x_ptp_resources), +}; + static struct platform_device *ixp46x_devices[] __initdata = { &ixp46x_hwrandom_device, &ixp46x_i2c_controller, + &ixp46x_ptp, }; unsigned long ixp4xx_exp_bus_size; diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h index abb07f105515..74e63d4531aa 100644 --- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h +++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h @@ -218,30 +218,30 @@ /* * PCI Control/Status Registers */ -#define IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) - -#define PCI_NP_AD IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) -#define PCI_NP_CBE IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) -#define PCI_NP_WDATA IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) -#define PCI_NP_RDATA IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) -#define PCI_CRP_AD_CBE IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) -#define PCI_CRP_WDATA IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) -#define PCI_CRP_RDATA IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) -#define PCI_CSR IXP4XX_PCI_CSR(PCI_CSR_OFFSET) -#define PCI_ISR IXP4XX_PCI_CSR(PCI_ISR_OFFSET) -#define PCI_INTEN IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) -#define PCI_DMACTRL IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) -#define PCI_AHBMEMBASE IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) -#define PCI_AHBIOBASE IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) -#define PCI_PCIMEMBASE IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) -#define PCI_AHBDOORBELL IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) -#define PCI_PCIDOORBELL IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) -#define PCI_ATPDMA0_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) -#define PCI_ATPDMA0_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) -#define PCI_ATPDMA0_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) -#define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) -#define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) -#define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) +#define _IXP4XX_PCI_CSR(x) ((volatile u32 *)(IXP4XX_PCI_CFG_BASE_VIRT+(x))) + +#define PCI_NP_AD _IXP4XX_PCI_CSR(PCI_NP_AD_OFFSET) +#define PCI_NP_CBE _IXP4XX_PCI_CSR(PCI_NP_CBE_OFFSET) +#define PCI_NP_WDATA _IXP4XX_PCI_CSR(PCI_NP_WDATA_OFFSET) +#define PCI_NP_RDATA _IXP4XX_PCI_CSR(PCI_NP_RDATA_OFFSET) +#define PCI_CRP_AD_CBE _IXP4XX_PCI_CSR(PCI_CRP_AD_CBE_OFFSET) +#define PCI_CRP_WDATA _IXP4XX_PCI_CSR(PCI_CRP_WDATA_OFFSET) +#define PCI_CRP_RDATA _IXP4XX_PCI_CSR(PCI_CRP_RDATA_OFFSET) +#define PCI_CSR _IXP4XX_PCI_CSR(PCI_CSR_OFFSET) +#define PCI_ISR _IXP4XX_PCI_CSR(PCI_ISR_OFFSET) +#define PCI_INTEN _IXP4XX_PCI_CSR(PCI_INTEN_OFFSET) +#define PCI_DMACTRL _IXP4XX_PCI_CSR(PCI_DMACTRL_OFFSET) +#define PCI_AHBMEMBASE _IXP4XX_PCI_CSR(PCI_AHBMEMBASE_OFFSET) +#define PCI_AHBIOBASE _IXP4XX_PCI_CSR(PCI_AHBIOBASE_OFFSET) +#define PCI_PCIMEMBASE _IXP4XX_PCI_CSR(PCI_PCIMEMBASE_OFFSET) +#define PCI_AHBDOORBELL _IXP4XX_PCI_CSR(PCI_AHBDOORBELL_OFFSET) +#define PCI_PCIDOORBELL _IXP4XX_PCI_CSR(PCI_PCIDOORBELL_OFFSET) +#define PCI_ATPDMA0_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_AHBADDR_OFFSET) +#define PCI_ATPDMA0_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_PCIADDR_OFFSET) +#define PCI_ATPDMA0_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA0_LENADDR_OFFSET) +#define PCI_ATPDMA1_AHBADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) +#define PCI_ATPDMA1_PCIADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) +#define PCI_ATPDMA1_LENADDR _IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) /* * PCI register values and bit definitions diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fdcd54d39c1e..62c3c1d2190f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -156,6 +156,7 @@ config ARM64 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_PFN_VALID select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 7b668db43261..1110d386f3b4 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -183,6 +183,8 @@ endif # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso + $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32 ifeq ($(KBUILD_EXTMOD),) # We need to generate vdso-offsets.h before compiling certain files in kernel/. diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts index 23cdcc9f7c72..1ccca83292ac 100644 --- a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, LGE Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <[email protected]> */ /dts-v1/; @@ -9,6 +10,9 @@ #include "pm8994.dtsi" #include "pmi8994.dtsi" +/* cont_splash_mem has different memory mapping */ +/delete-node/ &cont_splash_mem; + / { model = "LG Nexus 5X"; compatible = "lg,bullhead", "qcom,msm8992"; @@ -17,6 +21,9 @@ qcom,board-id = <0xb64 0>; qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>; + /* Bullhead firmware doesn't support PSCI */ + /delete-node/ psci; + aliases { serial0 = &blsp1_uart2; }; @@ -38,6 +45,11 @@ ftrace-size = <0x10000>; pmsg-size = <0x20000>; }; + + cont_splash_mem: memory@3400000 { + reg = <0 0x03400000 0 0x1200000>; + no-map; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts index ffe1a9bd8f70..c096b7758aa0 100644 --- a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts +++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts @@ -1,12 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2015, Huawei Inc. All rights reserved. * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2021, Petr Vorel <[email protected]> */ /dts-v1/; #include "msm8994.dtsi" +/* Angler's firmware does not report where the memory is allocated */ +/delete-node/ &cont_splash_mem; + / { model = "Huawei Nexus 6P"; compatible = "huawei,angler", "qcom,msm8994"; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index a8c274ad74c4..188c5768a55a 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -200,7 +200,7 @@ &BIG_CPU_SLEEP_1 &CLUSTER_SLEEP_0>; next-level-cache = <&L2_700>; - qcom,freq-domain = <&cpufreq_hw 1>; + qcom,freq-domain = <&cpufreq_hw 2>; #cooling-cells = <2>; L2_700: l2-cache { compatible = "cache"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi index 4d052e39b348..eb6b1d15293d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi @@ -69,7 +69,7 @@ }; rmtfs_upper_guard: memory@f5d01000 { no-map; - reg = <0 0xf5d01000 0 0x2000>; + reg = <0 0xf5d01000 0 0x1000>; }; /* @@ -78,7 +78,7 @@ */ removed_region: memory@88f00000 { no-map; - reg = <0 0x88f00000 0 0x200000>; + reg = <0 0x88f00000 0 0x1c00000>; }; ramoops: ramoops@ac300000 { diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index c2a709a384e9..d7591a4621a2 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -700,7 +700,7 @@ left_spkr: wsa8810-left{ compatible = "sdw10217211000"; reg = <0 3>; - powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrLeft"; #sound-dai-cells = <0>; @@ -708,7 +708,7 @@ right_spkr: wsa8810-right{ compatible = "sdw10217211000"; - powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>; reg = <0 4>; #thermal-sensor-cells = <0>; sound-name-prefix = "SpkrRight"; diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 21fa330f498d..b83fb24954b7 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -33,8 +33,7 @@ * EL2. */ .macro __init_el2_timers - mrs x0, cnthctl_el2 - orr x0, x0, #3 // Enable EL1 physical timers + mov x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset .endm diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 993a27ea6f54..f98c91bbd7c1 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to); typedef struct page *pgtable_t; +int pfn_valid(unsigned long pfn); int pfn_is_map_memory(unsigned long pfn); #include <asm/memory.h> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 8490ed2917ff..1fdb7bb7c198 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) free_area_init(max_zone_pfns); } +int pfn_valid(unsigned long pfn) +{ + phys_addr_t addr = PFN_PHYS(pfn); + struct mem_section *ms; + + /* + * Ensure the upper PAGE_SHIFT bits are clear in the + * pfn. Else it might lead to false positives when + * some of the upper bits are set, but the lower bits + * match a valid pfn. + */ + if (PHYS_PFN(addr) != pfn) + return 0; + + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + ms = __pfn_to_section(pfn); + if (!valid_section(ms)) + return 0; + + /* + * ZONE_DEVICE memory does not have the memblock entries. + * memblock_is_map_memory() check for ZONE_DEVICE based + * addresses will always fail. Even the normal hotplugged + * memory will never have MEMBLOCK_NOMAP flag set in their + * memblock entries. Skip memblock search for all non early + * memory sections covering all of hotplug memory including + * both normal and ZONE_DEVICE based. + */ + if (!early_section(ms)) + return pfn_section_valid(ms, pfn); + + return memblock_is_memory(addr); +} +EXPORT_SYMBOL(pfn_valid); + int pfn_is_map_memory(unsigned long pfn) { phys_addr_t addr = PFN_PHYS(pfn); diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 64201125a287..d4b145b279f6 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -4,6 +4,8 @@ #include <asm/bug.h> #include <asm/book3s/32/mmu-hash.h> +#include <asm/mmu.h> +#include <asm/synch.h> #ifndef __ASSEMBLY__ @@ -28,6 +30,15 @@ static inline void kuep_lock(void) return; update_user_segments(mfsr(0) | SR_NX); + /* + * This isync() shouldn't be necessary as the kernel is not excepted to + * run any instruction in userspace soon after the update of segments, + * but hash based cores (at least G3) seem to exhibit a random + * behaviour when the 'isync' is not there. 603 cores don't have this + * behaviour so don't do the 'isync' as it saves several CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } static inline void kuep_unlock(void) @@ -36,6 +47,15 @@ static inline void kuep_unlock(void) return; update_user_segments(mfsr(0) & ~SR_NX); + /* + * This isync() shouldn't be necessary as a 'rfi' will soon be executed + * to return to userspace, but hash based cores (at least G3) seem to + * exhibit a random behaviour when the 'isync' is not there. 603 cores + * don't have this behaviour so don't do the 'isync' as it saves several + * CPU cycles. + */ + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) + isync(); /* Context sync required after mtsr() */ } #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c index 0876216ceee6..edea388e9d3f 100644 --- a/arch/powerpc/mm/pageattr.c +++ b/arch/powerpc/mm/pageattr.c @@ -18,16 +18,12 @@ /* * Updates the attributes of a page in three steps: * - * 1. invalidate the page table entry - * 2. flush the TLB - * 3. install the new entry with the updated attributes - * - * Invalidating the pte means there are situations where this will not work - * when in theory it should. - * For example: - * - removing write from page whilst it is being executed - * - setting a page read-only whilst it is being read by another CPU + * 1. take the page_table_lock + * 2. install the new entry with the updated attributes + * 3. flush the TLB * + * This sequence is safe against concurrent updates, and also allows updating the + * attributes of a page currently being executed or accessed. */ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { @@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) spin_lock(&init_mm.page_table_lock); - /* invalidate the PTE so it's safe to modify */ - pte = ptep_get_and_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + pte = ptep_get(ptep); /* modify the PTE bits as desired, then apply */ switch (action) { @@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) break; } - set_pte_at(&init_mm, addr, ptep, pte); + pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + spin_unlock(&init_mm.page_table_lock); return 0; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 943fd30095af..8183ca343675 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1170,7 +1170,7 @@ out: return ret; } -static int __init xive_request_ipi(unsigned int cpu) +static int xive_request_ipi(unsigned int cpu) { struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; int ret; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 18bd0e4bc36c..120b2f6f71bc 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -229,8 +229,8 @@ static void __init init_resources(void) } /* Clean-up any unused pre-allocated resources */ - mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res); - memblock_free(__pa(mem_res), mem_res_sz); + if (res_idx >= 0) + memblock_free(__pa(mem_res), (res_idx + 1) * sizeof(*mem_res)); return; error: diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index b0993e05affe..8fcb7ecb7225 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -560,9 +560,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) int pcibios_add_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); struct resource *res; int i; + /* The pdev has a reference to the zdev via its bus */ + zpci_zdev_get(zdev); if (pdev->is_physfn) pdev->no_vf_scan = 1; @@ -582,7 +585,10 @@ int pcibios_add_device(struct pci_dev *pdev) void pcibios_release_device(struct pci_dev *pdev) { + struct zpci_dev *zdev = to_zpci(pdev); + zpci_unmap_resources(pdev); + zpci_zdev_put(zdev); } int pcibios_enable_device(struct pci_dev *pdev, int mask) diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index b877a97e6745..e359d2686178 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -22,6 +22,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev) kref_put(&zdev->kref, zpci_release_device); } +static inline void zpci_zdev_get(struct zpci_dev *zdev) +{ + kref_get(&zdev->kref); +} + int zpci_alloc_domain(int domain); void zpci_free_domain(int domain); int zpci_setup_bus_resources(struct zpci_dev *zdev, diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index d27a2a9faf3e..cde6db184c26 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1488,7 +1488,9 @@ static void vector_get_ethtool_stats(struct net_device *dev, } static int vector_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct vector_private *vp = netdev_priv(netdev); @@ -1497,7 +1499,9 @@ static int vector_get_coalesce(struct net_device *netdev, } static int vector_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct vector_private *vp = netdev_priv(netdev); diff --git a/block/blk-core.c b/block/blk-core.c index 04477697ee4b..4f8449b29b21 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -122,7 +122,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; - refcount_set(&rq->ref, 1); blk_crypto_rq_set_defaults(rq); } EXPORT_SYMBOL(blk_rq_init); diff --git a/block/blk-flush.c b/block/blk-flush.c index 1002f6c58181..4201728bf3a5 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -262,6 +262,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) spin_unlock_irqrestore(&fq->mq_flush_lock, flags); } +bool is_flush_rq(struct request *rq) +{ + return rq->end_io == flush_end_io; +} + /** * blk_kick_flush - consider issuing flush request * @q: request_queue being kicked @@ -329,6 +334,14 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_disk = first_rq->rq_disk; flush_rq->end_io = flush_end_io; + /* + * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one + * implied in refcount_inc_not_zero() called from + * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref + * and READ flush_rq->end_io + */ + smp_wmb(); + refcount_set(&flush_rq->ref, 1); blk_flush_queue_rq(flush_rq, false); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 2fe396385a4a..9d4fdc2be88a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -911,7 +911,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next) void blk_mq_put_rq_ref(struct request *rq) { - if (is_flush_rq(rq, rq->mq_hctx)) + if (is_flush_rq(rq)) rq->end_io(rq, 0); else if (refcount_dec_and_test(&rq->ref)) __blk_mq_free_request(rq); @@ -923,34 +923,14 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, unsigned long *next = priv; /* - * Just do a quick check if it is expired before locking the request in - * so we're not unnecessarilly synchronizing across CPUs. - */ - if (!blk_mq_req_expired(rq, next)) - return true; - - /* - * We have reason to believe the request may be expired. Take a - * reference on the request to lock this request lifetime into its - * currently allocated context to prevent it from being reallocated in - * the event the completion by-passes this timeout handler. - * - * If the reference was already released, then the driver beat the - * timeout handler to posting a natural completion. - */ - if (!refcount_inc_not_zero(&rq->ref)) - return true; - - /* - * The request is now locked and cannot be reallocated underneath the - * timeout handler's processing. Re-verify this exact request is truly - * expired; if it is not expired, then the request was completed and - * reallocated as a new request. + * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot + * be reallocated underneath the timeout handler's processing, then + * the expire check is reliable. If the request is not expired, then + * it was completed and reallocated as a new request after returning + * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, next)) blk_mq_rq_timed_out(rq, reserved); - - blk_mq_put_rq_ref(rq); return true; } diff --git a/block/blk.h b/block/blk.h index 4b885c0f6708..cb01429c162c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -44,11 +44,7 @@ static inline void __blk_get_queue(struct request_queue *q) kobject_get(&q->kobj); } -static inline bool -is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx) -{ - return hctx->fq->flush_rq == req; -} +bool is_flush_rq(struct request *req); struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, gfp_t flags); diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index 31cf9aee5edd..1f6007abcf18 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -292,6 +292,12 @@ void __init init_prmt(void) int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) + sizeof (struct acpi_table_prmt_header), 0, acpi_parse_prmt, 0); + /* + * Return immediately if PRMT table is not present or no PRM module found. + */ + if (mc <= 0) + return; + pr_info("PRM: found %u modules\n", mc); status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c index fbdbef0ab552..3a308461246a 100644 --- a/drivers/acpi/x86/s2idle.c +++ b/drivers/acpi/x86/s2idle.c @@ -452,7 +452,7 @@ int acpi_s2idle_prepare_late(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); @@ -479,7 +479,7 @@ void acpi_s2idle_restore_early(void) if (lps0_dsm_func_mask_microsoft > 0) { acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); - acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY, + acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON, lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 6535614a7dc1..1df2b5801c3b 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq); void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) { + device_initialize(&core->dev); core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); @@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) { int err; - err = device_register(&core->dev); + err = device_add(&core->dev); if (err) { bcma_err(bus, "Could not register dev for core 0x%03X\n", core->id.id); - put_device(&core->dev); return; } core->dev_registered = true; @@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus) /* Now noone uses internally-handled cores, we can free them */ list_for_each_entry_safe(core, tmp, &bus->cores, list) { list_del(&core->list); - kfree(core); + put_device(&core->dev); } } diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index d49e7c0de2b6..26d12a7e6ca0 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -141,8 +141,7 @@ static const char *bcma_device_name(const struct bcma_device_id *id) return "UNKNOWN"; } -static u32 bcma_scan_read32(struct bcma_bus *bus, u8 current_coreidx, - u16 offset) +static u32 bcma_scan_read32(struct bcma_bus *bus, u16 offset) { return readl(bus->mmio + offset); } @@ -443,7 +442,7 @@ void bcma_detect_chip(struct bcma_bus *bus) bcma_scan_switch_core(bus, BCMA_ADDR_BASE); - tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID); + tmp = bcma_scan_read32(bus, BCMA_CC_ID); chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT; chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT; chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT; @@ -465,7 +464,7 @@ int bcma_bus_scan(struct bcma_bus *bus) if (bus->nr_cores) return 0; - erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM); + erombase = bcma_scan_read32(bus, BCMA_CC_EROM); if (bus->hosttype == BCMA_HOSTTYPE_SOC) { eromptr = ioremap(erombase, BCMA_CORE_SIZE); if (!eromptr) diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index bc239a11aa69..5b9ea66b92dc 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags); + struct mhi_chan *mhi_chan); int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 84448233f64c..fc9196f11cb7 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -1430,7 +1430,7 @@ exit_unprepare_channel: } int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags) + struct mhi_chan *mhi_chan) { int ret = 0; struct device *dev = &mhi_chan->mhi_dev->dev; @@ -1455,9 +1455,6 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, if (ret) goto error_pm_state; - if (mhi_chan->dir == DMA_FROM_DEVICE) - mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); - /* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl, @@ -1613,7 +1610,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) } /* Move channel to start state */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) { int ret, dir; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1624,7 +1621,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) if (!mhi_chan) continue; - ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); + ret = mhi_prepare_channel(mhi_cntrl, mhi_chan); if (ret) goto error_open_chan; } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 0ef98e3ba341..148a4dd8cb9a 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3097,8 +3097,10 @@ static int sysc_probe(struct platform_device *pdev) return error; error = sysc_check_active_timer(ddata); - if (error == -EBUSY) + if (error == -ENXIO) ddata->reserved = true; + else if (error) + return error; error = sysc_get_clocks(ddata); if (error) diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 496900de0b0b..de36f58d551c 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk); } - imx_register_uart_clocks(1); + imx_register_uart_clocks(2); } CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init); diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 51ed640e527b..4ece326ea233 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc) if (on < 0) return on; - /* - * Votable GDSCs can be ON due to Vote from other masters. - * If a Votable GDSC is ON, make sure we have a Vote. - */ - if ((sc->flags & VOTABLE) && on) - gdsc_enable(&sc->pd); + if (on) { + /* The regulator must be on, sync the kernel state */ + if (sc->rsupply) { + ret = regulator_enable(sc->rsupply); + if (ret < 0) + return ret; + } - /* - * Make sure the retain bit is set if the GDSC is already on, otherwise - * we end up turning off the GDSC and destroying all the register - * contents that we thought we were saving. - */ - if ((sc->flags & RETAIN_FF_ENABLE) && on) - gdsc_retain_ff_on(sc); + /* + * Votable GDSCs can be ON due to Vote from other masters. + * If a Votable GDSC is ON, make sure we have a Vote. + */ + if (sc->flags & VOTABLE) { + ret = regmap_update_bits(sc->regmap, sc->gdscr, + SW_COLLAPSE_MASK, val); + if (ret) + return ret; + } + + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret < 0) + return ret; + } - /* If ALWAYS_ON GDSCs are not ON, turn them ON */ - if (sc->flags & ALWAYS_ON) { - if (!on) - gdsc_enable(&sc->pd); + /* + * Make sure the retain bit is set if the GDSC is already on, + * otherwise we end up turning off the GDSC and destroying all + * the register contents that we thought we were saving. + */ + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + } else if (sc->flags & ALWAYS_ON) { + /* If ALWAYS_ON GDSCs are not ON, turn them ON */ + gdsc_enable(&sc->pd); on = true; - sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; } if (on || (sc->pwrsts & PWRSTS_RET)) @@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc) else gdsc_clear_mem_on(sc); + if (sc->flags & ALWAYS_ON) + sc->pd.flags |= GENPD_FLAG_ALWAYS_ON; if (!sc->pd.power_off) sc->pd.power_off = gdsc_disable; if (!sc->pd.power_on) diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 3fc98a3ffd91..c10fc33b29b1 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -104,7 +104,11 @@ struct armada_37xx_dvfs { }; static struct armada_37xx_dvfs armada_37xx_dvfs[] = { - {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, + /* + * The cpufreq scaling for 1.2 GHz variant of the SOC is currently + * unstable because we do not know how to configure it properly. + */ + /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} }, {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} }, {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} }, diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index bef7528aecd3..231e585f6ba2 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -139,7 +139,9 @@ static const struct of_device_id blocklist[] __initconst = { { .compatible = "qcom,qcs404", }, { .compatible = "qcom,sc7180", }, { .compatible = "qcom,sc7280", }, + { .compatible = "qcom,sc8180x", }, { .compatible = "qcom,sdm845", }, + { .compatible = "qcom,sm8150", }, { .compatible = "st,stih407", }, { .compatible = "st,stih410", }, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index ec9a87ca2dbb..75f818d04b48 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -134,7 +134,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) } if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL)) - ret = -ENOMEM; + return -ENOMEM; /* Obtain CPUs that share SCMI performance controls */ ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index c7b364e4a287..e883731c3f8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -3026,6 +3026,14 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); + /* Flush pending deferred work to avoid racing with deferred actions from + * previous memory map changes (e.g. munmap). Concurrent memory map changes + * can still race with get_attr because we don't hold the mmap lock. But that + * would be a race condition in the application anyway, and undefined + * behaviour is acceptable in that case. + */ + flush_work(&p->svms.deferred_list_work); + mmap_read_lock(mm); if (!svm_range_is_valid(mm, start, size)) { pr_debug("invalid range\n"); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 605e297b7a59..a30283fa5173 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1530,6 +1530,12 @@ void dc_z10_restore(struct dc *dc) if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); } + +void dc_z10_save_init(struct dc *dc) +{ + if (dc->hwss.z10_save_init) + dc->hwss.z10_save_init(dc); +} #endif /* * Applies given context to HW and copy it into current context. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c index f2b39ec35c89..cde8ed2560b3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c */ memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config)); dc->vm_pa_config.valid = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_save_init(dc); +#endif } return num_vmids; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index af7b60108e9d..21d78289b048 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1338,6 +1338,7 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); #if defined(CONFIG_DRM_AMD_DC_DCN) void dc_z10_restore(struct dc *dc); +void dc_z10_save_init(struct dc *dc); #endif bool dc_enable_dmub_notifications(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 9776d1737818..912285fdce18 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1622,106 +1622,12 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); } -static void calculate_wm_set_for_vlevel( - int vlevel, - struct wm_range_table_entry *table_entry, - struct dcn_watermarks *wm_set, - struct display_mode_lib *dml, - display_e2e_pipe_params_st *pipes, - int pipe_cnt) -{ - double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; - - ASSERT(vlevel < dml->soc.num_states); - /* only pipe 0 is read for voltage and dcf/soc clocks */ - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; - - dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; - dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; - dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; - - wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; - wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; - wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; - wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; - wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; - dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; - -} - -static void dcn301_calculate_wm_and_dlg( - struct dc *dc, struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int pipe_cnt, - int vlevel_req) -{ - int i, pipe_idx; - int vlevel, vlevel_max; - struct wm_range_table_entry *table_entry; - struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; - - ASSERT(bw_params); - - vlevel_max = bw_params->clk_table.num_entries - 1; - - /* WM Set D */ - table_entry = &bw_params->wm_table.entries[WM_D]; - if (table_entry->wm_type == WM_TYPE_RETRAINING) - vlevel = 0; - else - vlevel = vlevel_max; - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set C */ - table_entry = &bw_params->wm_table.entries[WM_C]; - vlevel = min(max(vlevel_req, 2), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, - &context->bw_ctx.dml, pipes, pipe_cnt); - /* WM Set B */ - table_entry = &bw_params->wm_table.entries[WM_B]; - vlevel = min(max(vlevel_req, 1), vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, - &context->bw_ctx.dml, pipes, pipe_cnt); - - /* WM Set A */ - table_entry = &bw_params->wm_table.entries[WM_A]; - vlevel = min(vlevel_req, vlevel_max); - calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, - &context->bw_ctx.dml, pipes, pipe_cnt); - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); - pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); - - if (dc->config.forced_clocks) { - pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } - if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; - if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) - pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; - - pipe_idx++; - } - - dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); -} - static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 6ac6faf0c533..8a2119d8ca0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -404,6 +404,18 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) &pipe_ctx->stream_res.encoder_info_frame); } } +void dcn31_z10_save_init(struct dc *dc) +{ + union dmub_rb_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; + cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; + + dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); +} void dcn31_z10_restore(struct dc *dc) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 40dfebe78fdd..140435e4f7ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -44,6 +44,7 @@ void dcn31_enable_power_gating_plane( void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_save_init(struct dc *dc); void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index aaf2dbd095fe..b30d923471cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -97,6 +97,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .z10_restore = dcn31_z10_restore, + .z10_save_init = dcn31_z10_save_init, .is_abm_supported = dcn31_is_abm_supported, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .update_visual_confirm_color = dcn20_update_visual_confirm_color, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 5ab008e62b82..ad5f2adcc40d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -237,6 +237,7 @@ struct hw_sequencer_funcs { int width, int height, int offset); void (*z10_restore)(struct dc *dc); + void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7c4734f905d9..7fafb8d6c1da 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -856,6 +856,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware restore. */ DMUB_CMD__IDLE_OPT_DCN_RESTORE = 0, + + /** + * DCN hardware save. + */ + DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT = 1 }; /** diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 25979106fd25..02e8c6e5448d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5127,6 +5127,13 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) return size; } +static bool vega10_get_power_profile_mode_quirks(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + return (adev->pdev->device == 0x6860); +} + static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) { struct vega10_hwmgr *data = hwmgr->backend; @@ -5163,9 +5170,15 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui } out: - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + if (vega10_get_power_profile_mode_quirks(hwmgr)) + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, + 1 << power_profile_mode, + NULL); + else + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1), NULL); + hwmgr->power_profile_mode = power_profile_mode; return 0; diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index d29907955ff7..5d82891c3222 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -855,8 +855,6 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, req.request.sequence = req32.request.sequence; req.request.signal = req32.request.signal; err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED); - if (err) - return err; req32.reply.type = req.reply.type; req32.reply.sequence = req.reply.sequence; @@ -865,7 +863,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; - return 0; + return err; } #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index be716b56e8e0..00dade49665b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2463,6 +2463,15 @@ static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, } } +/* Splitter enable for eDP MSO is limited to certain pipes. */ +static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915) +{ + if (IS_ALDERLAKE_P(i915)) + return BIT(PIPE_A) | BIT(PIPE_B); + else + return BIT(PIPE_A); +} + static void intel_ddi_mso_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -2480,8 +2489,7 @@ static void intel_ddi_mso_get_config(struct intel_encoder *encoder, if (!pipe_config->splitter.enable) return; - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) { + if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) { pipe_config->splitter.enable = false; return; } @@ -2513,10 +2521,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) return; if (crtc_state->splitter.enable) { - /* Splitter enable is supported for pipe A only. */ - if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) - return; - dss1 |= SPLITTER_ENABLE; dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap); if (crtc_state->splitter.link_count == 2) @@ -4743,12 +4747,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) dig_port->hpd_pulse = intel_dp_hpd_pulse; - /* Splitter enable for eDP MSO is limited to certain pipes. */ - if (dig_port->dp.mso_link_count) { - encoder->pipe_mask = BIT(PIPE_A); - if (IS_ALDERLAKE_P(dev_priv)) - encoder->pipe_mask |= BIT(PIPE_B); - } + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 4298ae684d7d..86b7ac7b65ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -6387,13 +6387,13 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915) if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_enable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, - SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_enable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); } void intel_display_power_resume_early(struct drm_i915_private *i915) @@ -6402,13 +6402,13 @@ void intel_display_power_resume_early(struct drm_i915_private *i915) IS_BROXTON(i915)) { gen9_sanitize_dc_state(i915); bxt_disable_dc9(i915); - /* Tweaked Wa_14010685332:icp,jsp,mcc */ - if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC) - intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); } + + /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ + if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) + intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); } void intel_display_power_suspend(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 08bceae40aa8..053a3c2f7267 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -206,7 +206,6 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) return lttpr_count; } -EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c03943198089..c3816f5c6900 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3064,24 +3064,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static void cnp_display_clock_wa(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - - /* - * Wa_14010685332:cnp/cmp,tgp,adp - * TODO: Clarify which platforms this applies to - * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as - * on earlier platforms and whether the workaround is also needed for runtime suspend/resume - */ - if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP || - (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) { - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, - SBCLK_RUN_REFCLK_DIS); - intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); - } -} - static void gen8_display_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3115,7 +3097,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); - cnp_display_clock_wa(dev_priv); } static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) @@ -3159,8 +3140,6 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(uncore, SDE); - - cnp_display_clock_wa(dev_priv); } static void gen11_irq_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 6f4c80bbc0eb..473f5bb5cbad 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -133,6 +133,8 @@ static int mtk_disp_color_probe(struct platform_device *pdev) static int mtk_disp_color_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_color_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index fa9d79963cd3..5326989d5206 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -423,6 +423,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) static int mtk_disp_ovl_remove(struct platform_device *pdev) { + component_del(&pdev->dev, &mtk_disp_ovl_component_ops); + return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 75bc00e17fc4..50d20562e612 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -34,6 +34,7 @@ #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 +#define DISP_AAL_OUTPUT_SIZE 0x04d8 #define DISP_DITHER_EN 0x0000 #define DITHER_EN BIT(0) @@ -197,6 +198,7 @@ static void mtk_aal_config(struct device *dev, unsigned int w, struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_SIZE); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_AAL_OUTPUT_SIZE); } static void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index f949767698fc..bcb0310a41b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2237,6 +2237,33 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) interlock[NV50_DISP_INTERLOCK_CORE] = 0; } + /* Finish updating head(s)... + * + * NVD is rather picky about both where window assignments can change, + * *and* about certain core and window channel states matching. + * + * The EFI GOP driver on newer GPUs configures window channels with a + * different output format to what we do, and the core channel update + * in the assign_windows case above would result in a state mismatch. + * + * Delay some of the head update until after that point to workaround + * the issue. This only affects the initial modeset. + * + * TODO: handle this better when adding flexible window mapping + */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state); + struct nv50_head *head = nv50_head(crtc); + + NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name, + asyh->set.mask, asyh->clr.mask); + + if (asyh->set.mask) { + nv50_head_flush_set_wndw(head, asyh); + interlock[NV50_DISP_INTERLOCK_CORE] = 1; + } + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index ec361d17e900..d66f97280282 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -50,11 +50,8 @@ nv50_head_flush_clr(struct nv50_head *head, } void -nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh) { - if (asyh->set.view ) head->func->view (head, asyh); - if (asyh->set.mode ) head->func->mode (head, asyh); - if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.olut ) { asyh->olut.offset = nv50_lut_load(&head->olut, asyh->olut.buffer, @@ -62,6 +59,14 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) asyh->olut.load); head->func->olut_set(head, asyh); } +} + +void +nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + if (asyh->set.view ) head->func->view (head, asyh); + if (asyh->set.mode ) head->func->mode (head, asyh); + if (asyh->set.core ) head->func->core_set(head, asyh); if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.base ) head->func->base (head, asyh); if (asyh->set.ovly ) head->func->ovly (head, asyh); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index dae841dc05fd..0bac6be9ba34 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -21,6 +21,7 @@ struct nv50_head { struct nv50_head *nv50_head_create(struct drm_device *, int index); void nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh); +void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh); void nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool flush); diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 0b86c44878e0..59759c4fb62e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -4,7 +4,8 @@ struct nv_device_v0 { __u8 version; - __u8 pad01[7]; + __u8 priv; + __u8 pad02[6]; __u64 device; /* device identifier, ~0 for client default */ }; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index ba2c28ea43d2..c68cc957248e 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -61,8 +61,6 @@ #define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e #define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e #define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e -#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e -#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e #define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f #define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 347d2c020bd1..5d9395e651b6 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h @@ -9,7 +9,6 @@ struct nvif_client { const struct nvif_driver *driver; u64 version; u8 route; - bool super; }; int nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 8e85b936eaa0..7a3af05f7f98 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h @@ -11,7 +11,7 @@ struct nvif_driver { void (*fini)(void *priv); int (*suspend)(void *priv); int (*resume)(void *priv); - int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack); + int (*ioctl)(void *priv, void *data, u32 size, void **hack); void __iomem *(*map)(void *priv, u64 handle, u32 size); void (*unmap)(void *priv, void __iomem *ptr, u32 size); bool keep; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index 5d7017fe5039..2f86606e708c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -13,7 +13,6 @@ struct nvkm_client { struct nvkm_client_notify *notify[32]; struct rb_root objroot; - bool super; void *data; int (*ntfy)(const void *, u32, const void *, u32); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h index 71ed147ad077..f52918a43246 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h @@ -4,5 +4,5 @@ #include <core/os.h> struct nvkm_client; -int nvkm_ioctl(struct nvkm_client *, bool, void *, u32, void **); +int nvkm_ioctl(struct nvkm_client *, void *, u32, void **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 0911e73f7424..70e7887ef4b4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -15,7 +15,6 @@ struct nvkm_vma { u8 refd:3; /* Current page type (index, or NONE for unreferenced). */ bool used:1; /* Region allocated. */ bool part:1; /* Region was split from an allocated region by map(). */ - bool user:1; /* Region user-allocated. */ bool busy:1; /* Region busy (for temporarily preventing user access). */ bool mapped:1; /* Region contains valid pages. */ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */ diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index b45ec3086285..4107b7006539 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -570,11 +570,9 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) } client->route = NVDRM_OBJECT_ABI16; - client->super = true; ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle, NV_DMA_IN_MEMORY, &args, sizeof(args), &ntfy->object); - client->super = false; client->route = NVDRM_OBJECT_NVIF; if (ret) goto done; diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 40362600eed2..80099ef75702 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -86,12 +86,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) struct nouveau_channel *chan = *pchan; if (chan) { struct nouveau_cli *cli = (void *)chan->user.client; - bool super; - - if (cli) { - super = cli->base.super; - cli->base.super = true; - } if (chan->fence) nouveau_fence(chan->drm)->context_del(chan); @@ -111,9 +105,6 @@ nouveau_channel_del(struct nouveau_channel **pchan) nouveau_bo_unpin(chan->push.buffer); nouveau_bo_ref(NULL, &chan->push.buffer); kfree(chan); - - if (cli) - cli->base.super = super; } *pchan = NULL; } @@ -512,20 +503,16 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, struct nouveau_channel **pchan) { struct nouveau_cli *cli = (void *)device->object.client; - bool super; int ret; /* hack until fencenv50 is fixed, and agp access relaxed */ - super = cli->base.super; - cli->base.super = true; - ret = nouveau_channel_ind(drm, device, arg0, priv, pchan); if (ret) { NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret); ret = nouveau_channel_dma(drm, device, pchan); if (ret) { NV_PRINTK(dbg, cli, "dma channel create, %d\n", ret); - goto done; + return ret; } } @@ -533,15 +520,13 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device, if (ret) { NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret); nouveau_channel_del(pchan); - goto done; + return ret; } ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst); if (ret) nouveau_channel_del(pchan); -done: - cli->base.super = super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a616cf4573b8..ba4cd5f83725 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -244,6 +244,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname, ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE, &(struct nv_device_v0) { .device = ~0, + .priv = true, }, sizeof(struct nv_device_v0), &cli->device); if (ret) { @@ -1086,8 +1087,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) if (ret) goto done; - cli->base.super = false; - fpriv->driver_priv = cli; mutex_lock(&drm->client.mutex); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 0de6549fb875..2ca3207c13fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -41,8 +41,6 @@ nouveau_mem_map(struct nouveau_mem *mem, struct gf100_vmm_map_v0 gf100; } args; u32 argc = 0; - bool super; - int ret; switch (vmm->object.oclass) { case NVIF_CLASS_VMM_NV04: @@ -73,12 +71,7 @@ nouveau_mem_map(struct nouveau_mem *mem, return -ENOSYS; } - super = vmm->object.client->super; - vmm->object.client->super = true; - ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, - &mem->mem, 0); - vmm->object.client->super = super; - return ret; + return nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, &mem->mem, 0); } void @@ -99,7 +92,6 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; struct nvif_mem_ram_v0 args = {}; - bool super = cli->base.super; u8 type; int ret; @@ -122,11 +114,9 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) args.dma = tt->dma_address; mutex_lock(&drm->master.lock); - cli->base.super = true; ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, reg->num_pages << PAGE_SHIFT, &args, sizeof(args), &mem->mem); - cli->base.super = super; mutex_unlock(&drm->master.lock); return ret; } @@ -138,12 +128,10 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; - bool super = cli->base.super; u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page); int ret; mutex_lock(&drm->master.lock); - cli->base.super = true; switch (cli->mem->oclass) { case NVIF_CLASS_MEM_GF100: ret = nvif_mem_ctor_type(mmu, "ttmVram", cli->mem->oclass, @@ -167,7 +155,6 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) WARN_ON(1); break; } - cli->base.super = super; mutex_unlock(&drm->master.lock); reg->start = mem->mem.addr >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c index b3f29b1ce9ea..52f5793b7274 100644 --- a/drivers/gpu/drm/nouveau/nouveau_nvif.c +++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c @@ -52,9 +52,9 @@ nvkm_client_map(void *priv, u64 handle, u32 size) } static int -nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack) +nvkm_client_ioctl(void *priv, void *data, u32 size, void **hack) { - return nvkm_ioctl(priv, super, data, size, hack); + return nvkm_ioctl(priv, data, size, hack); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 82b583f5fca8..b0c3422cb01f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -237,14 +237,11 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) { if (limit > start) { - bool super = svmm->vmm->vmm.object.client->super; - svmm->vmm->vmm.object.client->super = true; nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR, &(struct nvif_vmm_pfnclr_v0) { .addr = start, .size = limit - start, }, sizeof(struct nvif_vmm_pfnclr_v0)); - svmm->vmm->vmm.object.client->super = super; } } @@ -634,9 +631,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, NVIF_VMM_PFNMAP_V0_A | NVIF_VMM_PFNMAP_V0_HOST; - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); unlock_page(page); @@ -702,9 +697,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, nouveau_hmm_convert_pfn(drm, &range, args); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); out: @@ -928,10 +921,8 @@ nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, mutex_lock(&svmm->mutex); - svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) + npages * sizeof(args->p.phys[0]), NULL); - svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); } diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 9dc10b17ad34..5da1f4d223d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -32,6 +32,9 @@ #include <nvif/event.h> #include <nvif/ioctl.h> +#include <nvif/class.h> +#include <nvif/cl0080.h> + struct usif_notify_p { struct drm_pending_event base; struct { @@ -261,7 +264,7 @@ usif_object_dtor(struct usif_object *object) } static int -usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) +usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc, bool parent_abi16) { struct nouveau_cli *cli = nouveau_cli(f); struct nvif_client *client = &cli->base; @@ -271,23 +274,48 @@ usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) struct usif_object *object; int ret = -ENOSYS; + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) + return ret; + + switch (args->v0.oclass) { + case NV_DMA_FROM_MEMORY: + case NV_DMA_TO_MEMORY: + case NV_DMA_IN_MEMORY: + return -EINVAL; + case NV_DEVICE: { + union { + struct nv_device_v0 v0; + } *args = data; + + if ((ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) + return ret; + + args->v0.priv = false; + break; + } + default: + if (!parent_abi16) + return -EINVAL; + break; + } + if (!(object = kmalloc(sizeof(*object), GFP_KERNEL))) return -ENOMEM; list_add(&object->head, &cli->objects); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { - object->route = args->v0.route; - object->token = args->v0.token; - args->v0.route = NVDRM_OBJECT_USIF; - args->v0.token = (unsigned long)(void *)object; - ret = nvif_client_ioctl(client, argv, argc); - args->v0.token = object->token; - args->v0.route = object->route; + object->route = args->v0.route; + object->token = args->v0.token; + args->v0.route = NVDRM_OBJECT_USIF; + args->v0.token = (unsigned long)(void *)object; + ret = nvif_client_ioctl(client, argv, argc); + if (ret) { + usif_object_dtor(object); + return ret; } - if (ret) - usif_object_dtor(object); - return ret; + args->v0.token = object->token; + args->v0.route = object->route; + return 0; } int @@ -301,6 +329,7 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) struct nvif_ioctl_v0 v0; } *argv = data; struct usif_object *object; + bool abi16 = false; u8 owner; int ret; @@ -331,11 +360,13 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) mutex_unlock(&cli->mutex); goto done; } + + abi16 = true; } switch (argv->v0.type) { case NVIF_IOCTL_V0_NEW: - ret = usif_object_new(filp, data, size, argv, argc); + ret = usif_object_new(filp, data, size, argv, argc, abi16); break; case NVIF_IOCTL_V0_NTFY_NEW: ret = usif_notify_new(filp, data, size, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c index 12644f811b3e..a3264a0e933a 100644 --- a/drivers/gpu/drm/nouveau/nvif/client.c +++ b/drivers/gpu/drm/nouveau/nvif/client.c @@ -32,7 +32,7 @@ int nvif_client_ioctl(struct nvif_client *client, void *data, u32 size) { - return client->driver->ioctl(client->object.priv, client->super, data, size, NULL); + return client->driver->ioctl(client->object.priv, data, size, NULL); } int @@ -80,7 +80,6 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device, client->object.client = client; client->object.handle = ~0; client->route = NVIF_IOCTL_V0_ROUTE_NVIF; - client->super = true; client->driver = parent->driver; if (ret == 0) { diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c index 671a5c0199e0..dce1ecee2af5 100644 --- a/drivers/gpu/drm/nouveau/nvif/object.c +++ b/drivers/gpu/drm/nouveau/nvif/object.c @@ -44,8 +44,7 @@ nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack) } else return -ENOSYS; - return client->driver->ioctl(client->object.priv, client->super, - data, size, hack); + return client->driver->ioctl(client->object.priv, data, size, hack); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c index d777df5a64e6..735cb6816f10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -426,8 +426,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, } int -nvkm_ioctl(struct nvkm_client *client, bool supervisor, - void *data, u32 size, void **hack) +nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack) { struct nvkm_object *object = &client->object; union { @@ -435,7 +434,6 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor, } *args = data; int ret = -ENOSYS; - client->super = supervisor; nvif_ioctl(object, "size %d\n", size); if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b930f539feec..93ddf63d1114 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2624,6 +2624,26 @@ nv174_chipset = { .dma = { 0x00000001, gv100_dma_new }, }; +static const struct nvkm_device_chip +nv177_chipset = { + .name = "GA107", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -3049,6 +3069,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { switch (device->chipset) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index fea9d8f2b10c..f28894fdede9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -397,7 +397,7 @@ nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size, return ret; /* give priviledged clients register access */ - if (client->super) + if (args->v0.priv) func = &nvkm_udevice_super; else func = &nvkm_udevice; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 55fbfe28c6dc..9669472a2749 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -440,7 +440,7 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps) return ret; } -static void +void nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior) { struct nvkm_dp *dp = nvkm_dp(outp); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h index 428b3f488f03..e484d0c3b0d4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h @@ -32,6 +32,7 @@ struct nvkm_dp { int nvkm_dp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvkm_outp **); +void nvkm_dp_disable(struct nvkm_outp *, struct nvkm_ior *); /* DPCD Receiver Capabilities */ #define DPCD_RC00_DPCD_REV 0x00000 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dffcac249211..129982fef7ef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ #include "outp.h" +#include "dp.h" #include "ior.h" #include <subdev/bios.h> @@ -257,6 +258,14 @@ nvkm_outp_init_route(struct nvkm_outp *outp) if (!ior->arm.head || ior->arm.proto != proto) { OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, ior->arm.proto, proto); + + /* The EFI GOP driver on Ampere can leave unused DP links routed, + * which we don't expect. The DisableLT IED script *should* get + * us back to where we need to be. + */ + if (ior->func->route.get && !ior->arm.head && outp->info.type == DCB_OUTPUT_DP) + nvkm_dp_disable(outp, ior); + return; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c index d20cc0681a88..797131ed7d67 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.c @@ -26,7 +26,6 @@ #include <core/client.h> #include <core/gpuobj.h> #include <subdev/fb.h> -#include <subdev/instmem.h> #include <nvif/cl0002.h> #include <nvif/unpack.h> @@ -72,11 +71,7 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, union { struct nv_dma_v0 v0; } *args = *pdata; - struct nvkm_device *device = dma->engine.subdev.device; - struct nvkm_client *client = oclass->client; struct nvkm_object *parent = oclass->parent; - struct nvkm_instmem *instmem = device->imem; - struct nvkm_fb *fb = device->fb; void *data = *pdata; u32 size = *psize; int ret = -ENOSYS; @@ -109,23 +104,13 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, dmaobj->target = NV_MEM_TARGET_VM; break; case NV_DMA_V0_TARGET_VRAM: - if (!client->super) { - if (dmaobj->limit >= fb->ram->size - instmem->reserved) - return -EACCES; - if (device->card_type >= NV_50) - return -EACCES; - } dmaobj->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_V0_TARGET_PCI: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; case NV_DMA_V0_TARGET_PCI_US: case NV_DMA_V0_TARGET_AGP: - if (!client->super) - return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 90e9a0972a44..3209eb7af65f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -27,8 +27,6 @@ nvkm-y += nvkm/engine/fifo/dmanv04.o nvkm-y += nvkm/engine/fifo/dmanv10.o nvkm-y += nvkm/engine/fifo/dmanv17.o nvkm-y += nvkm/engine/fifo/dmanv40.o -nvkm-y += nvkm/engine/fifo/dmanv50.o -nvkm-y += nvkm/engine/fifo/dmag84.o nvkm-y += nvkm/engine/fifo/gpfifonv50.o nvkm-y += nvkm/engine/fifo/gpfifog84.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h index af8bdf275552..3a95730d7ff5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h @@ -48,8 +48,6 @@ void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int); int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push, const struct nvkm_oclass *, struct nv50_fifo_chan *); -extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass nv50_fifo_gpfifo_oclass; -extern const struct nvkm_fifo_chan_oclass g84_fifo_dma_oclass; extern const struct nvkm_fifo_chan_oclass g84_fifo_gpfifo_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c deleted file mode 100644 index fc34cddcd2f5..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl826e.h> -#include <nvif/unpack.h> - -static int -g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct g82_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); - nvkm_wo32(chan->ramfc, 0x98, chan->base.inst->addr >> 12); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -g84_fifo_dma_oclass = { - .base.oclass = G82_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = g84_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c deleted file mode 100644 index 8043718ad150..000000000000 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ -#include "channv50.h" - -#include <core/client.h> -#include <core/ramht.h> - -#include <nvif/class.h> -#include <nvif/cl506e.h> -#include <nvif/unpack.h> - -static int -nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass, - void *data, u32 size, struct nvkm_object **pobject) -{ - struct nvkm_object *parent = oclass->parent; - union { - struct nv50_channel_dma_v0 v0; - } *args = data; - struct nv50_fifo *fifo = nv50_fifo(base); - struct nv50_fifo_chan *chan; - int ret = -ENOSYS; - - nvif_ioctl(parent, "create channel dma size %d\n", size); - if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { - nvif_ioctl(parent, "create channel dma vers %d vmm %llx " - "pushbuf %llx offset %016llx\n", - args->v0.version, args->v0.vmm, args->v0.pushbuf, - args->v0.offset); - if (!args->v0.pushbuf) - return -EINVAL; - } else - return ret; - - if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) - return -ENOMEM; - *pobject = &chan->base.object; - - ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf, - oclass, chan); - if (ret) - return ret; - - args->v0.chid = chan->base.chid; - - nvkm_kmap(chan->ramfc); - nvkm_wo32(chan->ramfc, 0x08, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x0c, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x10, lower_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x14, upper_32_bits(args->v0.offset)); - nvkm_wo32(chan->ramfc, 0x3c, 0x003f6078); - nvkm_wo32(chan->ramfc, 0x44, 0x01003fff); - nvkm_wo32(chan->ramfc, 0x48, chan->base.push->node->offset >> 4); - nvkm_wo32(chan->ramfc, 0x4c, 0xffffffff); - nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff); - nvkm_wo32(chan->ramfc, 0x78, 0x00000000); - nvkm_wo32(chan->ramfc, 0x7c, 0x30000001); - nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) | - (4 << 24) /* SEARCH_FULL */ | - (chan->ramht->gpuobj->node->offset >> 4)); - nvkm_done(chan->ramfc); - return 0; -} - -const struct nvkm_fifo_chan_oclass -nv50_fifo_dma_oclass = { - .base.oclass = NV50_CHANNEL_DMA, - .base.minver = 0, - .base.maxver = 0, - .ctor = nv50_fifo_dma_new, -}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index c0a7d0f21dac..3885c3830b94 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c @@ -119,7 +119,6 @@ g84_fifo = { .uevent_init = g84_fifo_uevent_init, .uevent_fini = g84_fifo_uevent_fini, .chan = { - &g84_fifo_dma_oclass, &g84_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c index b6900a52bcce..ae6c4d846eb5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c @@ -341,8 +341,6 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gk104_fifo_gpfifo_new_(fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c index ee4967b706a7..743791c514fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c @@ -226,8 +226,6 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c index abef7fb6e2d3..99aafa103a31 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu102.c @@ -65,8 +65,6 @@ tu102_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass, "runlist %016llx priv %d\n", args->v0.version, args->v0.vmm, args->v0.ioffset, args->v0.ilength, args->v0.runlist, args->v0.priv); - if (args->v0.priv && !oclass->client->super) - return -EINVAL; return gv100_fifo_gpfifo_new_(&tu102_fifo_gpfifo, fifo, &args->v0.runlist, &args->v0.chid, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index be94156ea248..a08742cf425a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -136,7 +136,6 @@ nv50_fifo = { .pause = nv04_fifo_pause, .start = nv04_fifo_start, .chan = { - &nv50_fifo_dma_oclass, &nv50_fifo_gpfifo_oclass, NULL }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c index fac2f9a45ea6..e530bb8b3b17 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c @@ -41,7 +41,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) object = nvkm_object_search(client, handle, &nvkm_umem); if (IS_ERR(object)) { - if (client->super && client != master) { + if (client != master) { spin_lock(&master->lock); list_for_each_entry(umem, &master->umem, head) { if (umem->object.object == handle) { @@ -53,8 +53,7 @@ nvkm_umem_search(struct nvkm_client *client, u64 handle) } } else { umem = nvkm_umem(object); - if (!umem->priv || client->super) - memory = nvkm_memory_ref(umem->memory); + memory = nvkm_memory_ref(umem->memory); } return memory ? memory : ERR_PTR(-ENOENT); @@ -167,7 +166,6 @@ nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, nvkm_object_ctor(&nvkm_umem, oclass, &umem->object); umem->mmu = mmu; umem->type = mmu->type[type].type; - umem->priv = oclass->client->super; INIT_LIST_HEAD(&umem->head); *pobject = &umem->object; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h index 85cf692d620a..d56a594016cc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h @@ -8,7 +8,6 @@ struct nvkm_umem { struct nvkm_object object; struct nvkm_mmu *mmu; u8 type:8; - bool priv:1; bool mappable:1; bool io:1; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c index 0e4b8941da37..6870fda4b188 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c @@ -34,7 +34,7 @@ nvkm_ummu_sclass(struct nvkm_object *object, int index, { struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; - if (mmu->func->mem.user.oclass && oclass->client->super) { + if (mmu->func->mem.user.oclass) { if (index-- == 0) { oclass->base = mmu->func->mem.user; oclass->ctor = nvkm_umem_new; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c index c43b8248c682..d6a1f8d04c09 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -45,7 +45,6 @@ nvkm_uvmm_search(struct nvkm_client *client, u64 handle) static int nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnclr_v0 v0; } *args = argv; @@ -59,9 +58,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_unmap(vmm, addr, size); @@ -74,7 +70,6 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_pfnmap_v0 v0; } *args = argv; @@ -93,9 +88,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (!client->super) - return -ENOENT; - if (size) { mutex_lock(&vmm->mutex); ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys); @@ -108,7 +100,6 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) static int nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_unmap_v0 v0; } *args = argv; @@ -130,9 +121,8 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -181,9 +171,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto fail; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto fail; } @@ -230,7 +219,6 @@ fail: static int nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_put_v0 v0; } *args = argv; @@ -252,9 +240,8 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc) goto done; } - if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { - VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr, - vma->user, !client->super, vma->busy); + if (ret = -ENOENT, vma->busy) { + VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); goto done; } @@ -268,7 +255,6 @@ done: static int nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) { - struct nvkm_client *client = uvmm->object.client; union { struct nvif_vmm_get_v0 v0; } *args = argv; @@ -297,7 +283,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc) return ret; args->v0.addr = vma->addr; - vma->user = !client->super; return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 710f3f8dc7c9..8bf00b396ec1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -774,7 +774,6 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) new->refd = vma->refd; new->used = vma->used; new->part = vma->part; - new->user = vma->user; new->busy = vma->busy; new->mapped = vma->mapped; list_add(&new->head, &vma->head); @@ -951,7 +950,7 @@ nvkm_vmm_node_split(struct nvkm_vmm *vmm, static void nvkm_vma_dump(struct nvkm_vma *vma) { - printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c%c %p\n", + printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n", vma->addr, (u64)vma->size, vma->used ? '-' : 'F', vma->mapref ? 'R' : '-', @@ -959,7 +958,6 @@ nvkm_vma_dump(struct nvkm_vma *vma) vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-', vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-', vma->part ? 'P' : '-', - vma->user ? 'U' : '-', vma->busy ? 'B' : '-', vma->mapped ? 'M' : '-', vma->memory); @@ -1024,7 +1022,6 @@ nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size) vma->mapref = true; vma->sparse = false; vma->used = true; - vma->user = true; nvkm_vmm_node_insert(vmm, vma); list_add_tail(&vma->head, &vmm->list); return 0; @@ -1615,7 +1612,6 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) vma->page = NVKM_VMA_PAGE_NONE; vma->refd = NVKM_VMA_PAGE_NONE; vma->used = false; - vma->user = false; nvkm_vmm_put_region(vmm, vma); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index f02abd9cb4dd..b5e733783b5b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -534,15 +534,13 @@ int gp100_vmm_mthd(struct nvkm_vmm *vmm, struct nvkm_client *client, u32 mthd, void *argv, u32 argc) { - if (client->super) { - switch (mthd) { - case GP100_VMM_VN_FAULT_REPLAY: - return gp100_vmm_fault_replay(vmm, argv, argc); - case GP100_VMM_VN_FAULT_CANCEL: - return gp100_vmm_fault_cancel(vmm, argv, argc); - default: - break; - } + switch (mthd) { + case GP100_VMM_VN_FAULT_REPLAY: + return gp100_vmm_fault_replay(vmm, argv, argc); + case GP100_VMM_VN_FAULT_CANCEL: + return gp100_vmm_fault_cancel(vmm, argv, argc); + default: + break; } return -EINVAL; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 74e3b460132b..2df59b3c2ea1 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -78,9 +78,7 @@ static int ttm_global_init(void) ttm_debugfs_root = debugfs_create_dir("ttm", NULL); if (IS_ERR(ttm_debugfs_root)) { - ret = PTR_ERR(ttm_debugfs_root); ttm_debugfs_root = NULL; - goto out; } /* Limit the number of pages in the pool to about 50% of the total diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index f782d5e1aa25..03e1db5d1e8c 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -249,6 +249,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)( mr->uobject = uobj; atomic_inc(&pd->usecnt); + rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); + rdma_restrack_set_name(&mr->res, NULL); + rdma_restrack_add(&mr->res); uobj->object = mr; uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DMABUF_MR_HANDLE); diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 283b6b81563c..ea0054c60fbc 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1681,6 +1681,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, if (nq) nq->budget++; atomic_inc(&rdev->srq_count); + spin_lock_init(&srq->lock); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index a8688a92c760..4678bd6ec7d6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1397,7 +1397,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) memset(&rattr, 0, sizeof(rattr)); rc = bnxt_re_register_netdev(rdev); if (rc) { - rtnl_unlock(); ibdev_err(&rdev->ibdev, "Failed to register with netedev: %#x\n", rc); return -EINVAL; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 203e6ddcacbc..be4a07bd268a 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -357,6 +357,7 @@ static int efa_enable_msix(struct efa_dev *dev) } if (irq_num != msix_vecs) { + efa_disable_msix(dev); dev_err(&dev->pdev->dev, "Allocated %d MSI-X (out of %d requested)\n", irq_num, msix_vecs); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index eb15c310d63d..e83dc562629e 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -3055,6 +3055,7 @@ static void __sdma_process_event(struct sdma_engine *sde, static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int i; + struct sdma_desc *descp; /* Handle last descriptor */ if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { @@ -3075,12 +3076,10 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) if (unlikely(tx->num_desc == MAX_DESC)) goto enomem; - tx->descp = kmalloc_array( - MAX_DESC, - sizeof(struct sdma_desc), - GFP_ATOMIC); - if (!tx->descp) + descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC); + if (!descp) goto enomem; + tx->descp = descp; /* reserve last descriptor for coalescing */ tx->desc_limit = MAX_DESC - 1; diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig index dab88286d549..b6f9c41bca51 100644 --- a/drivers/infiniband/hw/irdma/Kconfig +++ b/drivers/infiniband/hw/irdma/Kconfig @@ -6,7 +6,7 @@ config INFINIBAND_IRDMA depends on PCI depends on ICE && I40E select GENERIC_ALLOCATOR - select CONFIG_AUXILIARY_BUS + select AUXILIARY_BUS help This is an Intel(R) Ethernet Protocol Driver for RDMA driver that support E810 (iWARP/RoCE) and X722 (iWARP) network devices. diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ae05e143401c..466f0a521940 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4462,7 +4462,8 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev) mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); - list_del(&mpi->list); + else + list_del(&mpi->list); mutex_unlock(&mlx5_ib_multiport_mutex); kfree(mpi); } diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 0ea9a5aa4ec0..1c1d1b53312d 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -85,7 +85,7 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, goto out; } - elem = rxe_alloc(&rxe->mc_elem_pool); + elem = rxe_alloc_locked(&rxe->mc_elem_pool); if (!elem) { err = -ENOMEM; goto out; diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 85b812586ed4..72d95398e604 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -63,7 +63,7 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, if (*num_elem < 0) goto err1; - q = kmalloc(sizeof(*q), GFP_KERNEL); + q = kzalloc(sizeof(*q), GFP_KERNEL); if (!q) goto err1; diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 27cc5f03611c..f6fae64861ce 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -20,18 +20,13 @@ void qcom_icc_pre_aggregate(struct icc_node *node) { size_t i; struct qcom_icc_node *qn; - struct qcom_icc_provider *qp; qn = node->data; - qp = to_qcom_provider(node->provider); for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { qn->sum_avg[i] = 0; qn->max_peak[i] = 0; } - - for (i = 0; i < qn->num_bcms; i++) - qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); } EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate); @@ -49,8 +44,10 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, { size_t i; struct qcom_icc_node *qn; + struct qcom_icc_provider *qp; qn = node->data; + qp = to_qcom_provider(node->provider); if (!tag) tag = QCOM_ICC_TAG_ALWAYS; @@ -70,6 +67,9 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, *agg_avg += avg_bw; *agg_peak = max_t(u32, *agg_peak, peak_bw); + for (i = 0; i < qn->num_bcms; i++) + qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]); + return 0; } EXPORT_SYMBOL_GPL(qcom_icc_aggregate); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98ba927aee1a..6f0df629353f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -768,6 +768,7 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); sg_free_table(&sh->sgt); + kfree(sh); } #endif /* CONFIG_DMA_REMAP */ diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c6cf44a6c923..9ec374e17469 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -511,7 +511,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore) { struct pasid_entry *pte; - u16 did; + u16 did, pgtt; pte = intel_pasid_get_entry(dev, pasid); if (WARN_ON(!pte)) @@ -521,13 +521,19 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, return; did = pasid_get_domain_id(pte); + pgtt = pasid_pte_get_pgtt(pte); + intel_pasid_clear_entry(dev, pasid, fault_ignore); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); /* Device IOTLB doesn't need to be flushed in caching mode. */ if (!cap_caching_mode(iommu->cap)) diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 5ff61c3d401f..c11bc8b833b8 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte) return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; } +/* Get PGTT field of a PASID table entry */ +static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) +{ + return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); +} + extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 9b0f22bc0514..4b9b3f35ba0e 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -675,7 +675,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - intel_svm_free_pasid(mm); if (svm->notifier.ops) { mmu_notifier_unregister(&svm->notifier, mm); /* Clear mm's pasid. */ @@ -690,6 +689,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } + /* Drop a PASID reference and free it if no reference. */ + intel_svm_free_pasid(mm); } out: return ret; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 5419c4b9f27a..63f0af10c403 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -924,6 +924,9 @@ void iommu_group_remove_device(struct device *dev) struct iommu_group *group = dev->iommu_group; struct group_device *tmp_device, *device = NULL; + if (!group) + return; + dev_info(dev, "Removing from iommu group %d\n", group->id); /* Pre-notify listeners that a device is being removed. */ diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index 3461b0a7dc62..cbfdadecb23b 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -89,16 +89,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200) free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_disable_device(tpci200->info->pdev); - pci_dev_put(tpci200->info->pdev); } static void tpci200_enable_irq(struct tpci200_board *tpci200, @@ -257,7 +254,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_disable_pci; + goto err_disable_device; } /* Request IO ID INT space (Bar 3) */ @@ -269,7 +266,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ip_space; + goto err_ip_interface_bar; } /* Request MEM8 space (Bar 5) */ @@ -280,7 +277,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_io_id_int_spaces_bar; } /* Request MEM16 space (Bar 4) */ @@ -291,7 +288,7 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_mem8_space; + goto err_mem8_space_bar; } /* Map internal tpci200 driver user space */ @@ -305,7 +302,7 @@ static int tpci200_register(struct tpci200_board *tpci200) tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); res = -ENOMEM; - goto out_release_mem8_space; + goto err_mem16_space_bar; } /* Initialize lock that protects interface_regs */ @@ -344,18 +341,22 @@ static int tpci200_register(struct tpci200_board *tpci200) "(bn 0x%X, sn 0x%X) unable to register IRQ !", tpci200->info->pdev->bus->number, tpci200->info->pdev->devfn); - goto out_release_ioid_int_space; + goto err_interface_regs; } return 0; -out_release_mem8_space: +err_interface_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); +err_mem16_space_bar: + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); +err_mem8_space_bar: pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); -out_release_ioid_int_space: +err_io_id_int_spaces_bar: pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); -out_release_ip_space: +err_ip_interface_bar: pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); -out_disable_pci: +err_disable_device: pci_disable_device(tpci200->info->pdev); return res; } @@ -527,7 +528,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); if (!tpci200->info) { ret = -ENOMEM; - goto out_err_info; + goto err_tpci200; } pci_dev_get(pdev); @@ -538,7 +539,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); ret = -EBUSY; - goto out_err_pci_request; + goto err_tpci200_info; } tpci200->info->cfg_regs = ioremap( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), @@ -546,7 +547,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (!tpci200->info->cfg_regs) { dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); ret = -EFAULT; - goto out_err_ioremap; + goto err_request_region; } /* Disable byte swapping for 16 bit IP module access. This will ensure @@ -569,7 +570,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "error during tpci200 install\n"); ret = -ENODEV; - goto out_err_install; + goto err_cfg_regs; } /* Register the carrier in the industry pack bus driver */ @@ -581,7 +582,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "error registering the carrier on ipack driver\n"); ret = -EFAULT; - goto out_err_bus_register; + goto err_tpci200_install; } /* save the bus number given by ipack to logging purpose */ @@ -592,19 +593,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200_create_device(tpci200, i); return 0; -out_err_bus_register: +err_tpci200_install: tpci200_uninstall(tpci200); - /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */ - tpci200->info->cfg_regs = NULL; -out_err_install: - if (tpci200->info->cfg_regs) - iounmap(tpci200->info->cfg_regs); -out_err_ioremap: +err_cfg_regs: + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); +err_request_region: pci_release_region(pdev, TPCI200_CFG_MEM_BAR); -out_err_pci_request: - pci_dev_put(pdev); +err_tpci200_info: kfree(tpci200->info); -out_err_info: + pci_dev_put(pdev); +err_tpci200: kfree(tpci200); return ret; } @@ -614,6 +612,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) ipack_bus_unregister(tpci200->info->ipack_bus); tpci200_uninstall(tpci200); + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); + + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); + + pci_dev_put(tpci200->info->pdev); + kfree(tpci200->info); kfree(tpci200); } diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d1531..c3229d8c7041 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) continue; } - dw_mci_stop_dma(host); send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_SENDING_STOP; break; } @@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - dw_mci_stop_dma(host); if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); + dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index 51db30acf4dc..fdaa11f92fe6 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, u32 status; int ret = 0; - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && + host->pwr_reg & MCI_STM32_VSWITCHEN) { mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); spin_unlock_irqrestore(&host->lock, flags); @@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host, writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, host->base + MMCICLEAR); + spin_lock_irqsave(&host->lock, flags); mmci_write_pwrreg(host, host->pwr_reg & ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); } + spin_unlock_irqrestore(&host->lock, flags); return ret; } diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index cce390fe9cf3..e7565c671998 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host) return pltfm_host->clock; } +/* + * There is a known bug on BCM2711's SDHCI core integration where the + * controller will hang when the difference between the core clock and the bus + * clock is too great. Specifically this can be reproduced under the following + * conditions: + * + * - No SD card plugged in, polling thread is running, probing cards at + * 100 kHz. + * - BCM2711's core clock configured at 500MHz or more + * + * So we set 200kHz as the minimum clock frequency available for that SoC. + */ +static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host) +{ + return 200000; +} + static const struct sdhci_ops sdhci_iproc_ops = { .set_clock = sdhci_set_clock, .get_max_clock = sdhci_iproc_get_max_clock, @@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = { .set_clock = sdhci_set_clock, .set_power = sdhci_set_power_and_bus_voltage, .get_max_clock = sdhci_iproc_get_max_clock, + .get_min_clock = sdhci_iproc_bcm2711_get_min_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = { - .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .ops = &sdhci_iproc_bcm2711_ops, }; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index e44b7a66b73c..290a14cdc1cf 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2089,6 +2089,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) sdhci_cqe_disable(mmc, recovery); } +static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +{ + u32 count, start = 15; + + __sdhci_set_timeout(host, cmd); + count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL); + /* + * Update software timeout value if its value is less than hardware data + * timeout value. Qcom SoC hardware data timeout value was calculated + * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock. + */ + if (cmd && cmd->data && host->clock > 400000 && + host->clock <= 50000000 && + ((1 << (count + start)) > (10 * host->clock))) + host->data_timeout = 22LL * NSEC_PER_SEC; +} + static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { .enable = sdhci_msm_cqe_enable, .disable = sdhci_msm_cqe_disable, @@ -2438,6 +2455,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .irq = sdhci_msm_cqe_irq, .dump_vendor_regs = sdhci_msm_dump_vendor_regs, .set_power = sdhci_set_power_noreg, + .set_timeout = sdhci_msm_set_timeout, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 7370981e9b34..c6068a251fbe 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -224,8 +224,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, if (id == ESD_EV_CAN_ERROR_EXT) { u8 state = msg->msg.rx.data[0]; u8 ecc = msg->msg.rx.data[1]; - u8 txerr = msg->msg.rx.data[2]; - u8 rxerr = msg->msg.rx.data[3]; + u8 rxerr = msg->msg.rx.data[2]; + u8 txerr = msg->msg.rx.data[3]; skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 3faff95fd49f..542cfc4ccb08 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1473,9 +1473,6 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port, u16 data; u8 gates; - cur++; - next++; - if (i == schedule->num_entries) gates = initial->gate_mask ^ cur->gate_mask; @@ -1504,6 +1501,9 @@ static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port, (initial->gate_mask << TR_GCLCMD_INIT_GATE_STATES_SHIFT); hellcreek_write(hellcreek, data, TR_GCLCMD); + + cur++; + next++; } } @@ -1551,7 +1551,7 @@ static bool hellcreek_schedule_startable(struct hellcreek *hellcreek, int port) /* Calculate difference to admin base time */ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time); - return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC; + return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC; } static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index b1d46dd8eaab..6ea003678798 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -1277,15 +1277,16 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) int err; /* mv88e6393x family errata 4.6: - * Cannot clear PwrDn bit on SERDES on port 0 if device is configured - * CPU_MGD mode or P0_mode is configured for [x]MII. - * Workaround: Set Port0 SERDES register 4.F002 bit 5=0 and bit 15=1. + * Cannot clear PwrDn bit on SERDES if device is configured CPU_MGD + * mode or P0_mode is configured for [x]MII. + * Workaround: Set SERDES register 4.F002 bit 5=0 and bit 15=1. * * It seems that after this workaround the SERDES is automatically * powered up (the bit is cleared), so power it down. */ - if (lane == MV88E6393X_PORT0_LANE) { - err = mv88e6390_serdes_read(chip, MV88E6393X_PORT0_LANE, + if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE || + lane == MV88E6393X_PORT10_LANE) { + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC, ®); if (err) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1cdff1dca790..d796684ec9ca 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -118,6 +118,7 @@ config LANTIQ_XRX200 Support for the PMAC of the Gigabit switch (GSWIP) inside the Lantiq / Intel VRX200 VDSL SoC +source "drivers/net/ethernet/litex/Kconfig" source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index cb3f9084a21b..aaa5078cd7d1 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o +obj-$(CONFIG_NET_VENDOR_LITEX) += litex/ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig index ccad6a3f4d6f..f630cac2ab6c 100644 --- a/drivers/net/ethernet/actions/Kconfig +++ b/drivers/net/ethernet/actions/Kconfig @@ -2,8 +2,8 @@ config NET_VENDOR_ACTIONS bool "Actions Semi devices" - default y - depends on ARCH_ACTIONS + depends on ARCH_ACTIONS || COMPILE_TEST + default ARCH_ACTIONS help If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 860c18fb7aae..80399c8980bd 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err; + goto err_mdio_remove; } return 0; +err_mdio_remove: + xge_mdio_remove(ndev); err: free_netdev(ndev); diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 2b8ae687b3c1..c6ef7ec2c115 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ee66d410c82c..627f85ee3922 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -60,6 +60,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -276,6 +277,7 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, @@ -1651,6 +1653,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } else { @@ -1660,6 +1663,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } @@ -1675,6 +1679,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1685,6 +1690,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } @@ -1888,6 +1894,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, false); + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1901,6 +1908,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, payload | len); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1909,6 +1917,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) { skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -2003,6 +2012,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct rx_cmp *rxcmp; u16 cp_cons; u8 cmp_type; + int rc; cp_cons = RING_CMP(tmp_raw_cons); rxcmp = (struct rx_cmp *) @@ -2031,7 +2041,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp, tpa_end1->rx_tpa_end_cmp_errors_v2 |= cpu_to_le32(RX_TPA_END_CMP_ERRORS); } - return bnxt_rx_pkt(bp, cpr, raw_cons, event); + rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); + if (rc && rc != -EBUSY) + cpr->sw_stats.rx.rx_netpoll_discards += 1; + return rc; } u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) @@ -2257,6 +2270,12 @@ static int bnxt_async_event_process(struct bnxt *bp, bnxt_event_error_report(bp, data1, data2); goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { + u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; + + hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2276,10 +2295,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) switch (cmpl_type) { case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); - if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; - else - netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); break; case CMPL_BASE_TYPE_HWRM_FWD_REQ: @@ -3944,77 +3960,26 @@ out: static void bnxt_free_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; + struct bnxt_hwrm_wait_token *token; - if (bp->hwrm_cmd_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, - bp->hwrm_cmd_resp_dma_addr); - bp->hwrm_cmd_resp_addr = NULL; - } + dma_pool_destroy(bp->hwrm_dma_pool); + bp->hwrm_dma_pool = NULL; - if (bp->hwrm_cmd_kong_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, - bp->hwrm_cmd_kong_resp_addr, - bp->hwrm_cmd_kong_resp_dma_addr); - bp->hwrm_cmd_kong_resp_addr = NULL; - } -} - -static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_cmd_kong_resp_addr) - return 0; - - bp->hwrm_cmd_kong_resp_addr = - dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_kong_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_kong_resp_addr) - return -ENOMEM; - - return 0; + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); + rcu_read_unlock(); } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_resp_addr) + bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, + BNXT_HWRM_DMA_SIZE, + BNXT_HWRM_DMA_ALIGN, 0); + if (!bp->hwrm_dma_pool) return -ENOMEM; - return 0; -} - -static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) -{ - if (bp->hwrm_short_cmd_req_addr) { - struct pci_dev *pdev = bp->pdev; - - dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - bp->hwrm_short_cmd_req_addr, - bp->hwrm_short_cmd_req_dma_addr); - bp->hwrm_short_cmd_req_addr = NULL; - } -} - -static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_short_cmd_req_addr) - return 0; - - bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - &bp->hwrm_short_cmd_req_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_short_cmd_req_addr) - return -ENOMEM; + INIT_HLIST_HEAD(&bp->hwrm_pending_list); return 0; } @@ -4075,8 +4040,8 @@ static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, struct bnxt_stats_mem *stats) { - struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qstats_ext_input req = {0}; + struct hwrm_func_qstats_ext_output *resp; + struct hwrm_func_qstats_ext_input *req; __le64 *hw_masks; int rc; @@ -4084,19 +4049,20 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, !(bp->flags & BNXT_FLAG_CHIP_P5)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); if (rc) - goto qstat_exit; + return rc; - hw_masks = &resp->rx_ucast_pkts; - bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + req->fid = cpu_to_le16(0xffff); + req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; -qstat_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + } + hwrm_req_drop(bp, req); return rc; } @@ -4562,313 +4528,38 @@ static void bnxt_enable_int(struct bnxt *bp) } } -void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, - u16 cmpl_ring, u16 target_id) -{ - struct input *req = request; - - req->req_type = cpu_to_le16(req_type); - req->cmpl_ring = cpu_to_le16(cmpl_ring); - req->target_id = cpu_to_le16(target_id); - if (bnxt_kong_hwrm_message(bp, req)) - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - else - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); -} - -static int bnxt_hwrm_to_stderr(u32 hwrm_err) -{ - switch (hwrm_err) { - case HWRM_ERR_CODE_SUCCESS: - return 0; - case HWRM_ERR_CODE_RESOURCE_LOCKED: - return -EROFS; - case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: - return -EACCES; - case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: - return -ENOSPC; - case HWRM_ERR_CODE_INVALID_PARAMS: - case HWRM_ERR_CODE_INVALID_FLAGS: - case HWRM_ERR_CODE_INVALID_ENABLES: - case HWRM_ERR_CODE_UNSUPPORTED_TLV: - case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: - return -EINVAL; - case HWRM_ERR_CODE_NO_BUFFER: - return -ENOMEM; - case HWRM_ERR_CODE_HOT_RESET_PROGRESS: - case HWRM_ERR_CODE_BUSY: - return -EAGAIN; - case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: - return -EOPNOTSUPP; - default: - return -EIO; - } -} - -static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, - int timeout, bool silent) -{ - int i, intr_process, rc, tmo_count; - struct input *req = msg; - u32 *data = msg; - u8 *valid; - u16 cp_ring_id, len = 0; - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; - struct hwrm_short_input short_input = {0}; - u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; - u16 dst = BNXT_HWRM_CHNL_CHIMP; - - if (BNXT_NO_FW_ACCESS(bp) && - le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) - return -EBUSY; - - if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { - if (msg_len > bp->hwrm_max_ext_req_len || - !bp->hwrm_short_cmd_req_addr) - return -EINVAL; - } - - if (bnxt_hwrm_kong_chnl(bp, req)) { - dst = BNXT_HWRM_CHNL_KONG; - bar_offset = BNXT_GRCPF_REG_KONG_COMM; - doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; - resp = bp->hwrm_cmd_kong_resp_addr; - } - - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - - req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - msg_len > BNXT_HWRM_MAX_REQ_LEN) { - void *short_cmd_req = bp->hwrm_short_cmd_req_addr; - u16 max_msg_len; - - /* Set boundary for maximum extended request length for short - * cmd format. If passed up from device use the max supported - * internal req length. - */ - max_msg_len = bp->hwrm_max_ext_req_len; - - memcpy(short_cmd_req, req, msg_len); - if (msg_len < max_msg_len) - memset(short_cmd_req + msg_len, 0, - max_msg_len - msg_len); - - short_input.req_type = req->req_type; - short_input.signature = - cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); - short_input.size = cpu_to_le16(msg_len); - short_input.req_addr = - cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); - - data = (u32 *)&short_input; - msg_len = sizeof(short_input); - - /* Sync memory write before updating doorbell */ - wmb(); - - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; - } - - /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); - - for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + bar_offset + i); - - /* Ring channel doorbell */ - writel(1, bp->bar0 + doorbell_offset); - - if (!pci_is_enabled(bp->pdev)) - return -ENODEV; - - if (!timeout) - timeout = DFLT_HWRM_CMD_TIMEOUT; - /* Limit timeout to an upper limit */ - timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT); - /* convert timeout to usec */ - timeout *= 1000; - - i = 0; - /* Short timeout for the first few iterations: - * number of loops = number of loops for short timeout + - * number of loops for standard timeout. - */ - tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; - timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; - tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - - if (intr_process) { - u16 seq_id = bp->hwrm_intr_seq_id; - - /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != (u16)~seq_id && - i++ < tmo_count) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - break; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (bp->hwrm_intr_seq_id != (u16)~seq_id) { - if (!silent) - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -EBUSY; - } - len = le16_to_cpu(resp->resp_len); - valid = ((u8 *)resp) + len - 1; - } else { - int j; - - /* Check if response len is updated */ - for (i = 0; i < tmo_count; i++) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - len = le16_to_cpu(resp->resp_len); - if (len) - break; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - goto timeout_abort; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (i >= tmo_count) { -timeout_abort: - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -EBUSY; - } - - /* Last byte of resp contains valid bit */ - valid = ((u8 *)resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { - /* make sure we read from updated DMA memory */ - dma_rmb(); - if (*valid) - break; - usleep_range(1, 5); - } - - if (j >= HWRM_VALID_BIT_DELAY_USEC) { - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, - *valid); - return -EBUSY; - } - } - - /* Zero valid bit for compatibility. Valid bit in an older spec - * may become a new field in a newer spec. We must make sure that - * a new field not implemented by old spec will read zero. - */ - *valid = 0; - rc = le16_to_cpu(resp->error_code); - if (rc && !silent) - netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - le16_to_cpu(resp->req_type), - le16_to_cpu(resp->seq_id), rc); - return bnxt_hwrm_to_stderr(rc); -} - -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); -} - -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); -} - -int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, msg, msg_len, timeout); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only) { - struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_rgtr_input req = {0}; DECLARE_BITMAP(async_events_bmap, 256); u32 *events = (u32 *)async_events_bmap; + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; u32 flags; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER | - FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; - req.flags = cpu_to_le32(flags); - req.ver_maj_8b = DRV_VER_MAJ; - req.ver_min_8b = DRV_VER_MIN; - req.ver_upd_8b = DRV_VER_UPD; - req.ver_maj = cpu_to_le16(DRV_VER_MAJ); - req.ver_min = cpu_to_le16(DRV_VER_MIN); - req.ver_upd = cpu_to_le16(DRV_VER_UPD); + req->flags = cpu_to_le32(flags); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + req->ver_maj = cpu_to_le16(DRV_VER_MAJ); + req->ver_min = cpu_to_le16(DRV_VER_MIN); + req->ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -4885,14 +4576,14 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } for (i = 0; i < 8; i++) - req.vf_req_fwd[i] = cpu_to_le32(data[i]); + req->vf_req_fwd[i] = cpu_to_le32(data[i]); - req.enables |= + req->enables |= cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.flags |= cpu_to_le32( + req->flags |= cpu_to_le32( FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); memset(async_events_bmap, 0, sizeof(async_events_bmap)); @@ -4911,57 +4602,63 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } } for (i = 0; i < 8; i++) - req.async_event_fwd[i] |= cpu_to_le32(events[i]); + req->async_event_fwd[i] |= cpu_to_le32(events[i]); if (async_only) - req.enables = + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); if (resp->flags & cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { - struct hwrm_func_drv_unrgtr_input req = {0}; + struct hwrm_func_drv_unrgtr_input *req; + int rc; if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); - req.tunnel_type = tunnel_type; + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); + if (rc) + return rc; + + req->tunnel_type = tunnel_type; switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", rc); @@ -4971,17 +4668,19 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_alloc_input req = {0}; - struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_tunnel_dst_port_alloc_output *resp; + struct hwrm_tunnel_dst_port_alloc_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); + if (rc) + return rc; - req.tunnel_type = tunnel_type; - req.tunnel_dst_port_val = port; + req->tunnel_type = tunnel_type; + req->tunnel_dst_port_val = port; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", rc); @@ -5001,33 +4700,40 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, } err_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) { - struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct hwrm_cfa_l2_set_rx_mask_input *req; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; - req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); - req.mask = cpu_to_le32(vnic->rx_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); } #ifdef CONFIG_RFS_ACCEL static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_free_input req = {0}; + struct hwrm_cfa_ntuple_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); - req.ntuple_filter_id = fltr->filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->ntuple_filter_id = fltr->filter_id; + return hwrm_req_send(bp, req); } #define BNXT_NTP_FLTR_FLAGS \ @@ -5052,69 +4758,70 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_output *resp; + struct hwrm_cfa_ntuple_filter_alloc_input *req; struct flow_keys *keys = &fltr->fkeys; struct bnxt_vnic_info *vnic; u32 flags = 0; - int rc = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; + req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; - req.dst_id = cpu_to_le16(fltr->rxq); + req->dst_id = cpu_to_le16(fltr->rxq); } else { vnic = &bp->vnic_info[fltr->rxq + 1]; - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); - req.ethertype = htons(ETH_P_IP); - memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.ip_protocol = keys->basic.ip_proto; + req->ethertype = htons(ETH_P_IP); + memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->ip_protocol = keys->basic.ip_proto; if (keys->basic.n_proto == htons(ETH_P_IPV6)) { int i; - req.ethertype = htons(ETH_P_IPV6); - req.ip_addr_type = + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; - *(struct in6_addr *)&req.src_ipaddr[0] = + *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; - *(struct in6_addr *)&req.dst_ipaddr[0] = + *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; for (i = 0; i < 4; i++) { - req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); - req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); } } else { - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); } if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { - req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); - req.tunnel_type = + req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req->tunnel_type = CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; } - req.src_port = keys->ports.src; - req.src_port_mask = cpu_to_be16(0xffff); - req.dst_port = keys->ports.dst; - req.dst_port_mask = cpu_to_be16(0xffff); + req->src_port = keys->ports.src; + req->src_port_mask = cpu_to_be16(0xffff); + req->dst_port = keys->ports.dst; + req->dst_port_mask = cpu_to_be16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) fltr->filter_id = resp->ntuple_filter_id; - } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } #endif @@ -5122,62 +4829,62 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, u8 *mac_addr) { - u32 rc = 0; - struct hwrm_cfa_l2_filter_alloc_input req = {0}; - struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) - req.flags |= + req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); - req.enables = + req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req->enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); - memcpy(req.l2_addr, mac_addr, ETH_ALEN); - req.l2_addr_mask[0] = 0xff; - req.l2_addr_mask[1] = 0xff; - req.l2_addr_mask[2] = 0xff; - req.l2_addr_mask[3] = 0xff; - req.l2_addr_mask[4] = 0xff; - req.l2_addr_mask[5] = 0xff; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + memcpy(req->l2_addr, mac_addr, ETH_ALEN); + req->l2_addr_mask[0] = 0xff; + req->l2_addr_mask[1] = 0xff; + req->l2_addr_mask[2] = 0xff; + req->l2_addr_mask[3] = 0xff; + req->l2_addr_mask[4] = 0xff; + req->l2_addr_mask[5] = 0xff; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = resp->l2_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) { + struct hwrm_cfa_l2_filter_free_input *req; u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ - int rc = 0; + int rc; /* Any associated ntuple filters will also be cleared by firmware. */ - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 0; i < num_of_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; for (j = 0; j < vnic->uc_filter_count; j++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; + req->l2_filter_id = vnic->fw_l2_filter_id[j]; - bnxt_hwrm_cmd_hdr_init(bp, &req, - HWRM_CFA_L2_FILTER_FREE, -1, -1); - - req.l2_filter_id = vnic->fw_l2_filter_id[j]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } vnic->uc_filter_count = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); - + hwrm_req_drop(bp, req); return rc; } @@ -5185,12 +4892,15 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; - struct hwrm_vnic_tpa_cfg_input req = {0}; + struct hwrm_vnic_tpa_cfg_input *req; + int rc; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); + if (rc) + return rc; if (tpa_flags) { u16 mss = bp->dev->mtu - 40; @@ -5204,9 +4914,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) if (tpa_flags & BNXT_FLAG_GRO) flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; - req.flags = cpu_to_le32(flags); + req->flags = cpu_to_le32(flags); - req.enables = + req->enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); @@ -5230,14 +4940,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) } else { segs = ilog2(nsegs); } - req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(max_aggs); + req->max_agg_segs = cpu_to_le16(segs); + req->max_aggs = cpu_to_le16(max_aggs); - req.min_agg_len = cpu_to_le32(512); + req->min_agg_len = cpu_to_le32(512); } - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) @@ -5381,86 +5091,102 @@ static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; + int rc; if ((bp->flags & BNXT_FLAG_CHIP_P5) || vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + if (set_rss) { bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req.hash_key_tbl_addr = + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return hwrm_req_send(bp, req); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); - if (!set_rss) { - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return 0; - } bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); ring_tbl_map = vnic->rss_table_dma_addr; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); - for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { - int rc; - req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); - req.ring_table_pair_index = i; - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_req_send(bp, req); if (rc) - return rc; + goto exit; } - return 0; + +exit: + hwrm_req_drop(bp, req); + return rc; } static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_plcmodes_cfg_input req = {0}; + struct hwrm_vnic_plcmodes_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); - req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); /* thresholds not implemented in firmware yet */ - req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { - struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); - req.rss_cos_lb_ctx_id = + req->rss_cos_lb_ctx_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } @@ -5481,20 +5207,20 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; int rc; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = - bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, - -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -5508,47 +5234,50 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_cfg_input req = {0}; + struct hwrm_vnic_cfg_input *req; + unsigned int ring = 0, grp_idx; u16 def_vlan = 0; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; - req.default_rx_ring_id = + req->default_rx_ring_id = cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); - req.default_cmpl_ring_id = + req->default_cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); - req.enables = + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); goto vnic_mru; } - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req.rss_rule = + req->rss_rule = cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { - req.rss_rule = cpu_to_le16(0xffff); + req->rss_rule = cpu_to_le16(0xffff); } if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { - req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); } else { - req.cos_rule = cpu_to_le16(0xffff); + req->cos_rule = cpu_to_le16(0xffff); } if (vnic->flags & BNXT_VNIC_RSS_FLAG) @@ -5559,34 +5288,36 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); + req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); + req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { - struct hwrm_vnic_free_input req = {0}; + struct hwrm_vnic_free_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); - req.vnic_id = + if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } } @@ -5603,11 +5334,15 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings) { - int rc = 0; unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct hwrm_vnic_alloc_input req = {0}; - struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) goto vnic_no_ring_grps; @@ -5627,22 +5362,20 @@ vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; if (vnic_id == 0) - req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) { - struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_vnic_qcaps_input req = {0}; + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; int rc; bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); @@ -5650,9 +5383,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10600) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u32 flags = le32_to_cpu(resp->flags); @@ -5678,92 +5414,96 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { + struct hwrm_ring_grp_alloc_output *resp; + struct hwrm_ring_grp_alloc_input *req; + int rc; u16 i; - u32 rc = 0; if (bp->flags & BNXT_FLAG_CHIP_P5) return 0; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->rx_nr_rings; i++) { - struct hwrm_ring_grp_alloc_input req = {0}; - struct hwrm_ring_grp_alloc_output *resp = - bp->hwrm_cmd_resp_addr; unsigned int grp_idx = bp->rx_ring[i].bnapi->index; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); - req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); + rc = hwrm_req_send(bp, req); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); if (rc) break; bp->grp_info[grp_idx].fw_grp_id = le32_to_cpu(resp->ring_group_id); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { + struct hwrm_ring_grp_free_input *req; u16 i; - struct hwrm_ring_grp_free_input req = {0}; if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) + return; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) continue; - req.ring_group_id = + req->ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index) { - int rc = 0, err = 0; - struct hwrm_ring_alloc_input req = {0}; - struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; + int rc, err = 0; u16 ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); + if (rc) + goto exit; - req.enables = 0; + req->enables = 0; if (rmem->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); + req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ - req.page_size = BNXT_PAGE_SHIFT; - req.page_tbl_depth = 1; + req->page_size = BNXT_PAGE_SHIFT; + req->page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } - req.fbo = 0; + req->fbo = 0; /* Association of ring index with doorbell index and MSIX number */ - req.logical_id = cpu_to_le16(map_index); + req->logical_id = cpu_to_le16(map_index); switch (ring_type) { case HWRM_RING_ALLOC_TX: { @@ -5771,67 +5511,67 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); - req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); - req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.queue_id = cpu_to_le16(ring->queue_id); + req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); + req->length = cpu_to_le32(bp->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); break; } case HWRM_RING_ALLOC_RX: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - req.length = cpu_to_le32(bp->rx_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 flags = 0; /* Association of rx ring with stats context */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); if (NET_IP_ALIGN == 2) flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; - req.flags = cpu_to_le16(flags); + req->flags = cpu_to_le16(flags); } break; case HWRM_RING_ALLOC_AGG: if (bp->flags & BNXT_FLAG_CHIP_P5) { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; /* Association of agg ring with rx ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); - req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); } else { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; } - req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { /* Association of cp ring with nq */ grp_info = &bp->grp_info[map_index]; - req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); - req.cq_handle = cpu_to_le64(ring->handle); - req.enables |= cpu_to_le32( + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); } else if (bp->flags & BNXT_FLAG_USING_MSIX) { - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; } break; case HWRM_RING_ALLOC_NQ: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", @@ -5839,12 +5579,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return -1; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); err = le16_to_cpu(resp->error_code); ring_id = le16_to_cpu(resp->ring_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: if (rc || err) { netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", ring_type, rc, err); @@ -5859,23 +5600,28 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) int rc; if (BNXT_PF(bp)) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } else { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } - return rc; } static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, @@ -6046,23 +5792,27 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) { + struct hwrm_ring_free_output *resp; + struct hwrm_ring_free_input *req; + u16 error_code = 0; int rc; - struct hwrm_ring_free_input req = {0}; - struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - u16 error_code; if (BNXT_NO_FW_ACCESS(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); - req.ring_type = ring_type; - req.ring_id = cpu_to_le16(ring->fw_ring_id); + rc = hwrm_req_init(bp, req, HWRM_RING_FREE); + if (rc) + goto exit; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - error_code = le16_to_cpu(resp->error_code); - mutex_unlock(&bp->hwrm_cmd_lock); + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + error_code = le16_to_cpu(resp->error_code); + hwrm_req_drop(bp, req); +exit: if (rc || error_code) { netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", ring_type, rc, error_code); @@ -6177,20 +5927,23 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, static int bnxt_hwrm_get_rings(struct bnxt *bp) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6224,39 +5977,45 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_cp_rings = cp; hw_resc->resv_stat_ctxs = stats; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } -/* Caller must hold bp->hwrm_cmd_lock */ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(fid); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + hwrm_req_drop(bp, req); return rc; } static bool bnxt_rfs_supported(struct bnxt *bp); -static void -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, - int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int stats, int vnics) +static struct hwrm_func_cfg_input * +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + return NULL; + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; req->num_tx_rings = cpu_to_le16(tx_rings); @@ -6297,17 +6056,19 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); + return req; } -static void -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, - struct hwrm_func_vf_cfg_input *req, int tx_rings, - int rx_rings, int ring_grps, int cp_rings, - int stats, int vnics) +static struct hwrm_func_vf_cfg_input * +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_vf_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) + return NULL; + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -6339,21 +6100,27 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); + return req; } static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - if (!req.enables) + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + if (!req->enables) { + hwrm_req_drop(bp, req); return 0; + } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6367,7 +6134,7 @@ static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { @@ -6375,9 +6142,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6578,14 +6348,14 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; u32 flags; if (!BNXT_NEW_RM(bp)) return 0; - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -6595,20 +6365,19 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!(bp->flags & BNXT_FLAG_CHIP_P5)) flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; u32 flags; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -6622,9 +6391,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; } - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -6645,9 +6413,9 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) { - struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; - struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp; + struct hwrm_ring_aggint_qcaps_input *req; int rc; coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; @@ -6663,9 +6431,11 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10902) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); coal_cap->nq_params = le32_to_cpu(resp->nq_params); @@ -6685,7 +6455,7 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) le16_to_cpu(resp->num_cmpl_aggr_int_max); coal_cap->timer_units = le16_to_cpu(resp->timer_units); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) @@ -6753,37 +6523,40 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); } -/* Caller holds bp->hwrm_cmd_lock */ static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_coal *hw_coal) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; u32 nq_params = coal_cap->nq_params; u16 tmr; + int rc; if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); - req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); - req.flags = + rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req->flags = cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); - req.int_lat_tmr_min = cpu_to_le16(tmr); - req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); - return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->int_lat_tmr_min = cpu_to_le16(tmr); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return hwrm_req_send(bp, req); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; + int rc; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -6796,48 +6569,53 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) if (!bnapi->rx_ring) return -ENODEV; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, req_rx); - req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); + req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); - return hwrm_send_message(bp, &req_rx, sizeof(req_rx), - HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req_rx); } int bnxt_hwrm_set_coal(struct bnxt *bp) { - int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, - req_tx = {0}, *req; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx, + *req; + int i, rc; + + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req_tx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) { + hwrm_req_drop(bp, req_rx); + return rc; + } - bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req_rx); + hwrm_req_hold(bp, req_tx); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_coal *hw_coal; u16 ring_id; - req = &req_rx; + req = req_rx; if (!bnapi->rx_ring) { ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); - req = &req_tx; + req = req_tx; } else { ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); } req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6845,11 +6623,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) continue; if (bnapi->rx_ring && bnapi->tx_ring) { - req = &req_tx; + req = req_tx; ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; } @@ -6859,14 +6636,15 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) hw_coal = &bp->tx_coal; __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_rx); + hwrm_req_drop(bp, req_tx); return rc; } static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - struct hwrm_stat_ctx_clr_stats_input req0 = {0}; - struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; + struct hwrm_stat_ctx_free_input *req; int i; if (!bp->bnapi) @@ -6875,53 +6653,60 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); + if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) + return; + if (BNXT_FW_MAJ(bp) <= 20) { + if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { + hwrm_req_drop(bp, req); + return; + } + hwrm_req_hold(bp, req0); + } + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { - req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - if (BNXT_FW_MAJ(bp) <= 20) { - req0.stat_ctx_id = req.stat_ctx_id; - _hwrm_send_message(bp, &req0, sizeof(req0), - HWRM_CMD_TIMEOUT); + req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + if (req0) { + req0->stat_ctx_id = req->stat_ctx_id; + hwrm_req_send(bp, req0); } - _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + if (req0) + hwrm_req_drop(bp, req0); } static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) { - int rc = 0, i; - struct hwrm_stat_ctx_alloc_input req = {0}; - struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + int rc, i; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; - req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); - req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); + req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); - mutex_lock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); + req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6929,22 +6714,25 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { - struct hwrm_func_qcfg_input req = {0}; - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u32 min_db_offset = 0; u16 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto func_qcfg_exit; @@ -7004,7 +6792,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->db_size = pci_resource_len(bp->pdev, 2); func_qcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7043,17 +6831,19 @@ static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) { - struct hwrm_func_backing_store_qcaps_input req = {0}; - struct hwrm_func_backing_store_qcaps_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_func_backing_store_qcaps_output *resp; + struct hwrm_func_backing_store_qcaps_input *req; int rc; if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; @@ -7118,7 +6908,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) rc = 0; } ctx_err: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7149,15 +6939,17 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) { - struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_input *req; struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_pg_info *ctx_pg; - u32 req_len = sizeof(req); + void **__req = (void **)&req; + u32 req_len = sizeof(*req); __le32 *num_entries; __le64 *pg_dir; u32 flags = 0; u8 *pg_attr; u32 ena; + int rc; int i; if (!ctx) @@ -7165,90 +6957,93 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) if (req_len > bp->hwrm_max_ext_req_len) req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); - req.enables = cpu_to_le32(enables); + rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); + if (rc) + return rc; + req->enables = cpu_to_le32(enables); if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { ctx_pg = &ctx->qp_mem; - req.qp_num_entries = cpu_to_le32(ctx_pg->entries); - req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); - req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); - req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + req->qp_num_entries = cpu_to_le32(ctx_pg->entries); + req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.qpc_pg_size_qpc_lvl, - &req.qpc_page_dir); + &req->qpc_pg_size_qpc_lvl, + &req->qpc_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { ctx_pg = &ctx->srq_mem; - req.srq_num_entries = cpu_to_le32(ctx_pg->entries); - req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); - req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + req->srq_num_entries = cpu_to_le32(ctx_pg->entries); + req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.srq_pg_size_srq_lvl, - &req.srq_page_dir); + &req->srq_pg_size_srq_lvl, + &req->srq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { ctx_pg = &ctx->cq_mem; - req.cq_num_entries = cpu_to_le32(ctx_pg->entries); - req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); - req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); - bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, - &req.cq_page_dir); + req->cq_num_entries = cpu_to_le32(ctx_pg->entries); + req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->cq_pg_size_cq_lvl, + &req->cq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { ctx_pg = &ctx->vnic_mem; - req.vnic_num_vnic_entries = + req->vnic_num_vnic_entries = cpu_to_le16(ctx->vnic_max_vnic_entries); - req.vnic_num_ring_table_entries = + req->vnic_num_ring_table_entries = cpu_to_le16(ctx->vnic_max_ring_table_entries); - req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.vnic_pg_size_vnic_lvl, - &req.vnic_page_dir); + &req->vnic_pg_size_vnic_lvl, + &req->vnic_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { ctx_pg = &ctx->stat_mem; - req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); - req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.stat_pg_size_stat_lvl, - &req.stat_page_dir); + &req->stat_pg_size_stat_lvl, + &req->stat_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { ctx_pg = &ctx->mrav_mem; - req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); if (ctx->mrav_num_entries_units) flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; - req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.mrav_pg_size_mrav_lvl, - &req.mrav_page_dir); + &req->mrav_pg_size_mrav_lvl, + &req->mrav_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { ctx_pg = &ctx->tim_mem; - req.tim_num_entries = cpu_to_le32(ctx_pg->entries); - req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + req->tim_num_entries = cpu_to_le32(ctx_pg->entries); + req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.tim_pg_size_tim_lvl, - &req.tim_page_dir); + &req->tim_pg_size_tim_lvl, + &req->tim_page_dir); } - for (i = 0, num_entries = &req.tqm_sp_num_entries, - pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, - pg_dir = &req.tqm_sp_page_dir, + for (i = 0, num_entries = &req->tqm_sp_num_entries, + pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req->tqm_sp_page_dir, ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; i < BNXT_MAX_TQM_RINGS; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { if (!(enables & ena)) continue; - req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); ctx_pg = ctx->tqm_mem[i]; *num_entries = cpu_to_le32(ctx_pg->entries); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } - req.flags = cpu_to_le32(flags); - return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send(bp, req); } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@ -7528,17 +7323,18 @@ skip_rdma: int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { - struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_resource_qcaps_input req = {0}; + struct hwrm_func_resource_qcaps_output *resp; + struct hwrm_func_resource_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc) goto hwrm_func_resc_qcaps_exit; @@ -7579,15 +7375,14 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; } hwrm_func_resc_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -/* bp->hwrm_cmd_lock already held. */ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) { - struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_mac_ptp_qcfg_input req = {0}; + struct hwrm_port_mac_ptp_qcfg_output *resp; + struct hwrm_port_mac_ptp_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u8 flags; int rc; @@ -7597,21 +7392,27 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto no_ptp; } - req.port_id = cpu_to_le16(bp->pf.port_id); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); if (rc) goto no_ptp; + req->port_id = cpu_to_le16(bp->pf.port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + flags = resp->flags; if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; - goto no_ptp; + goto exit; } if (!ptp) { ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); - if (!ptp) - return -ENOMEM; + if (!ptp) { + rc = -ENOMEM; + goto exit; + } ptp->bp = bp; bp->ptp_cfg = ptp; } @@ -7623,14 +7424,16 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { rc = -ENODEV; - goto no_ptp; + goto exit; } rc = bnxt_ptp_init(bp); + if (rc) + netdev_warn(bp->dev, "PTP initialization failed.\n"); +exit: + hwrm_req_drop(bp, req); if (!rc) return 0; - netdev_warn(bp->dev, "PTP initialization failed.\n"); - no_ptp: bnxt_ptp_clear(bp); kfree(ptp); @@ -7640,17 +7443,19 @@ no_ptp: static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u32 flags, flags_ext; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_func_qcaps_exit; @@ -7728,7 +7533,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } hwrm_func_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7759,19 +7564,20 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) { - struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; - int rc = 0; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; u32 flags; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) return 0; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_cfa_adv_qcaps_exit; @@ -7781,7 +7587,7 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7924,17 +7730,20 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { - struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_error_recovery_qcfg_input req = {0}; + struct hwrm_error_recovery_qcfg_output *resp; + struct hwrm_error_recovery_qcfg_input *req; int rc, i; if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto err_recovery_out; fw_health->flags = le32_to_cpu(resp->flags); @@ -7976,7 +7785,7 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) resp->delay_after_reset[i]; } err_recovery_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!rc) rc = bnxt_map_fw_health_regs(bp); if (rc) @@ -7986,12 +7795,16 @@ err_recovery_out: static int bnxt_hwrm_func_reset(struct bnxt *bp) { - struct hwrm_func_reset_input req = {0}; + struct hwrm_func_reset_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); - req.enables = 0; + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); + if (rc) + return rc; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); + req->enables = 0; + hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) @@ -8006,16 +7819,18 @@ static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { - int rc = 0; - struct hwrm_queue_qportcfg_input req = {0}; - struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; u8 i, j, *qptr; bool no_rdma; + int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto qportcfg_exit; @@ -8049,35 +7864,48 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) bp->max_lltc = bp->max_tc; qportcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) +static int bnxt_hwrm_poll(struct bnxt *bp) { - struct hwrm_ver_get_input req = {0}; + struct hwrm_ver_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); - req.hwrm_intf_maj = HWRM_VERSION_MAJOR; - req.hwrm_intf_min = HWRM_VERSION_MINOR; - req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; - rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, - silent); + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); + rc = hwrm_req_send(bp, req); return rc; } static int bnxt_hwrm_ver_get(struct bnxt *bp) { - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; int rc, len; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_hwrm_ver_get(bp, false); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_ver_get_exit; @@ -8169,29 +7997,33 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; hwrm_ver_get_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_fw_set_time(struct bnxt *bp) { - struct hwrm_fw_set_time_input req = {0}; + struct hwrm_fw_set_time_input *req; struct tm tm; time64_t now = ktime_get_real_seconds(); + int rc; if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; time64_to_tm(now, 0, &tm); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); - req.year = cpu_to_le16(1900 + tm.tm_year); - req.month = 1 + tm.tm_mon; - req.day = tm.tm_mday; - req.hour = tm.tm_hour; - req.minute = tm.tm_min; - req.second = tm.tm_sec; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return hwrm_req_send(bp, req); } static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) @@ -8279,8 +8111,9 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp) static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { + struct hwrm_port_qstats_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_port_qstats_input req = {0}; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; @@ -8288,20 +8121,24 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - req.flags = flags; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + BNXT_TX_PORT_STATS_BYTE_OFFSET); - req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { - struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; - struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_queue_pri2cos_qcfg_output *resp_qc; + struct hwrm_queue_pri2cos_qcfg_input *req_qc; + struct hwrm_port_qstats_ext_output *resp_qs; + struct hwrm_port_qstats_ext_input *req_qs; struct bnxt_pf_info *pf = &bp->pf; u32 tx_stat_size; int rc; @@ -8312,46 +8149,53 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); - req.flags = flags; - req.port_id = cpu_to_le16(pf->port_id); - req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); + if (rc) + return rc; + + req_qs->flags = flags; + req_qs->port_id = cpu_to_le16(pf->port_id); + req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); tx_stat_size = bp->tx_port_stats_ext.hw_stats ? sizeof(struct tx_port_stats_ext) : 0; - req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); + req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + resp_qs = hwrm_req_hold(bp, req_qs); + rc = hwrm_req_send(bp, req_qs); if (!rc) { - bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_rx_stats_ext_size = + le16_to_cpu(resp_qs->rx_stat_size) / 8; bp->fw_tx_stats_ext_size = tx_stat_size ? - le16_to_cpu(resp->tx_stat_size) / 8 : 0; + le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + hwrm_req_drop(bp, req_qs); + if (flags) - goto qstats_done; + return rc; if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { - mutex_unlock(&bp->hwrm_cmd_lock); bp->pri2cos_valid = 0; return rc; } - bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; + + req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); - rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + resp_qc = hwrm_req_hold(bp, req_qc); + rc = hwrm_req_send(bp, req_qc); if (!rc) { - struct hwrm_queue_pri2cos_qcfg_output *resp2; u8 *pri2cos; int i, j; - resp2 = bp->hwrm_cmd_resp_addr; - pri2cos = &resp2->pri0_cos_queue_id; + pri2cos = &resp_qc->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; u8 queue_idx; @@ -8360,17 +8204,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) queue_idx = queue_id % 10; if (queue_idx > BNXT_MAX_QUEUE) { bp->pri2cos_valid = false; - goto qstats_done; + hwrm_req_drop(bp, req_qc); + return rc; } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) bp->pri2cos_idx[i] = queue_idx; } } - bp->pri2cos_valid = 1; + bp->pri2cos_valid = true; } -qstats_done: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_qc); + return rc; } @@ -8445,35 +8290,46 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + u8 evb_mode; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); if (br_mode == BRIDGE_MODE_VEB) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; else if (br_mode == BRIDGE_MODE_VEPA) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + req->evb_mode = evb_mode; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@ -9421,18 +9277,20 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_port_phy_qcaps_input req = {0}; - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc = 0; if (bp->hwrm_spec_code < 0x10201) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_phy_qcaps_exit; @@ -9470,7 +9328,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->port_count = resp->port_cnt; hwrm_phy_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9483,19 +9341,21 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported) int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { - int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - struct hwrm_port_phy_qcfg_input req = {0}; - struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcfg_output *resp; + struct hwrm_port_phy_qcfg_input *req; u8 link_up = link_info->link_up; bool support_changed = false; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9590,7 +9450,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) /* alwasy link down if not require to update link state */ link_info->link_up = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!BNXT_PHY_CFG_ABLE(bp)) return 0; @@ -9700,18 +9560,20 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_ int bnxt_hwrm_set_pause(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - bnxt_hwrm_set_pause_common(bp, &req); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + bnxt_hwrm_set_pause_common(bp, req); if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || bp->link_info.force_link_chng) - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { /* since changing of pause setting doesn't trigger any link * change event, the driver needs to update the current pause @@ -9724,7 +9586,6 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) bnxt_report_link(bp); } bp->link_info.force_link_chng = false; - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -9753,22 +9614,27 @@ static void bnxt_hwrm_set_eee(struct bnxt *bp, int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); if (set_pause) - bnxt_hwrm_set_pause_common(bp, &req); + bnxt_hwrm_set_pause_common(bp, req); - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); if (set_eee) - bnxt_hwrm_set_eee(bp, &req); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bnxt_hwrm_set_eee(bp, req); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; if (!BNXT_SINGLE_PF(bp)) return 0; @@ -9777,9 +9643,12 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); + return hwrm_req_send(bp, req); } static int bnxt_fw_init_one(struct bnxt *bp); @@ -9805,16 +9674,14 @@ static int bnxt_try_recover_fw(struct bnxt *bp) int retry = 0, rc; u32 sts; - mutex_lock(&bp->hwrm_cmd_lock); do { sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (!BNXT_FW_IS_BOOTING(sts) && !BNXT_FW_IS_RECOVERING(sts)) break; retry++; } while (rc == -EBUSY && retry < BNXT_FW_RETRY); - mutex_unlock(&bp->hwrm_cmd_lock); if (!BNXT_FW_IS_HEALTHY(sts)) { netdev_err(bp->dev, @@ -9834,8 +9701,8 @@ static int bnxt_try_recover_fw(struct bnxt *bp) static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { - struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_if_change_input req = {0}; + struct hwrm_func_drv_if_change_output *resp; + struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; int rc, retry = 0; @@ -9844,29 +9711,34 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); + if (rc) + return rc; + if (up) - req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); - mutex_lock(&bp->hwrm_cmd_lock); + req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + resp = hwrm_req_hold(bp, req); + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); while (retry < BNXT_FW_IF_RETRY) { - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc != -EAGAIN) break; msleep(50); retry++; } - if (!rc) - flags = le32_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - if (rc == -EAGAIN) + if (rc == -EAGAIN) { + hwrm_req_drop(bp, req); return rc; - if (rc && up) { + } else if (!rc) { + flags = le32_to_cpu(resp->flags); + } else if (up) { rc = bnxt_try_recover_fw(bp); fw_reset = true; } + hwrm_req_drop(bp, req); if (rc) return rc; @@ -9935,8 +9807,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) { - struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_led_qcaps_input req = {0}; + struct hwrm_port_led_qcaps_output *resp; + struct hwrm_port_led_qcaps_input *req; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -9944,12 +9816,15 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { @@ -9969,52 +9844,64 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_alloc_input req = {0}; - struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_alloc_output *resp; + struct hwrm_wol_filter_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; - req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); - memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->wol_filter_id = resp->wol_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_free_input req = {0}; + struct hwrm_wol_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); - req.wol_filter_id = bp->wol_filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req->wol_filter_id = bp->wol_filter_id; + + return hwrm_req_send(bp, req); } static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) { - struct hwrm_wol_filter_qcfg_input req = {0}; - struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_qcfg_output *resp; + struct hwrm_wol_filter_qcfg_input *req; u16 next_handle = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.handle = cpu_to_le16(handle); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->handle = cpu_to_le16(handle); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { next_handle = le16_to_cpu(resp->next_handle); if (next_handle != 0) { @@ -10025,7 +9912,7 @@ static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return next_handle; } @@ -10046,19 +9933,20 @@ static void bnxt_get_wol_settings(struct bnxt *bp) static ssize_t bnxt_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; struct bnxt *bp = dev_get_drvdata(dev); u32 len = 0; int rc; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (rc) return rc; return len; @@ -10081,12 +9969,13 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { - struct hwrm_temp_monitor_query_input req = {0}; + struct hwrm_temp_monitor_query_input *req; struct pci_dev *pdev = bp->pdev; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (!rc) + rc = hwrm_req_send_silent(bp, req); if (rc == -EACCES || rc == -EOPNOTSUPP) { bnxt_hwmon_close(bp); return; @@ -10311,7 +10200,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); /* Poll link status and check for SFP+ module status */ + mutex_lock(&bp->link_lock); bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) @@ -10521,53 +10412,60 @@ static int bnxt_close(struct net_device *dev) static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, u16 *val) { - struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_mdio_read_input req = {0}; + struct hwrm_port_phy_mdio_read_output *resp; + struct hwrm_port_phy_mdio_read_input *req; int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *val = le16_to_cpu(resp->reg_data); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, u16 val) { - struct hwrm_port_phy_mdio_write_input req = {0}; + struct hwrm_port_phy_mdio_write_input *req; + int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - req.reg_data = cpu_to_le16(val); + req->reg_data = cpu_to_le16(val); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* rtnl_lock held */ @@ -10646,6 +10544,10 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); + + stats->rx_dropped += + cpr->sw_stats.rx.rx_netpoll_discards + + cpr->sw_stats.rx.rx_oom_discards; } } @@ -10660,6 +10562,7 @@ static void bnxt_add_prev_stats(struct bnxt *bp, stats->tx_bytes += prev_stats->tx_bytes; stats->rx_missed_errors += prev_stats->rx_missed_errors; stats->multicast += prev_stats->multicast; + stats->rx_dropped += prev_stats->rx_dropped; stats->tx_dropped += prev_stats->tx_dropped; } @@ -10804,6 +10707,7 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct hwrm_cfa_l2_filter_free_input *req; struct netdev_hw_addr *ha; int i, off = 0, rc; bool uc_update; @@ -10815,19 +10719,16 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) if (!uc_update) goto skip_uc; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 1; i < vnic->uc_filter_count; i++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, - -1); + req->l2_filter_id = vnic->fw_l2_filter_id[i]; - req.l2_filter_id = vnic->fw_l2_filter_id[i]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); vnic->uc_filter_count = 1; @@ -11179,22 +11080,30 @@ static netdev_features_t bnxt_features_check(struct sk_buff *skb, int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf) { - struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_read_direct_input req = {0}; + struct hwrm_dbg_read_direct_output *resp; + struct hwrm_dbg_read_direct_input *req; __le32 *dbg_reg_buf; dma_addr_t mapping; int rc, i; - dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, - &mapping, GFP_KERNEL); - if (!dbg_reg_buf) - return -ENOMEM; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); - req.host_dest_addr = cpu_to_le64(mapping); - req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); - req.read_len32 = cpu_to_le32(num_words); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); + if (rc) + return rc; + + dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, + &mapping); + if (!dbg_reg_buf) { + rc = -ENOMEM; + goto dbg_rd_reg_exit; + } + + req->host_dest_addr = cpu_to_le64(mapping); + + resp = hwrm_req_hold(bp, req); + req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req->read_len32 = cpu_to_le32(num_words); + + rc = hwrm_req_send(bp, req); if (rc || resp->error_code) { rc = -EIO; goto dbg_rd_reg_exit; @@ -11203,28 +11112,30 @@ int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); dbg_rd_reg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + hwrm_req_drop(bp, req); return rc; } static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { - struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_ring_info_get_input req = {0}; + struct hwrm_dbg_ring_info_get_output *resp; + struct hwrm_dbg_ring_info_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); - req.ring_type = ring_type; - req.fw_ring_id = cpu_to_le32(ring_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); + if (rc) + return rc; + + req->ring_type = ring_type; + req->fw_ring_id = cpu_to_le32(ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *prod = le32_to_cpu(resp->producer_index); *cons = le32_to_cpu(resp->consumer_index); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -11282,18 +11193,22 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct hwrm_ring_reset_input req = {0}; + struct hwrm_ring_reset_input *req; struct bnxt_napi *bnapi = rxr->bnapi; struct bnxt_cp_ring_info *cpr; u16 cp_ring_id; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_RING_RESET); + if (rc) + return rc; cpr = &bnapi->cp_ring; cp_ring_id = cpr->cp_ring_struct.fw_ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); - req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; - req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->cmpl_ring = cpu_to_le16(cp_ring_id); + req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_req_send_silent(bp, req); } static void bnxt_reset_task(struct bnxt *bp, bool silent) @@ -11722,12 +11637,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) static void bnxt_fw_echo_reply(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_func_echo_response_input req = {0}; + struct hwrm_func_echo_response_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1); - req.event_data1 = cpu_to_le32(fw_health->echo_req_data1); - req.event_data2 = cpu_to_le32(fw_health->echo_req_data2); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); + if (rc) + return; + req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); + req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); + hwrm_req_send(bp, req); } static void bnxt_sp_task(struct work_struct *work) @@ -11932,18 +11850,6 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) return rc; } - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - return rc; - } bnxt_nvm_cfg_ver_get(bp); rc = bnxt_hwrm_func_reset(bp); @@ -12118,14 +12024,16 @@ static void bnxt_reset_all(struct bnxt *bp) for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) bnxt_fw_reset_writel(bp, i); } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { - struct hwrm_fw_reset_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + struct hwrm_fw_reset_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (!rc) { + req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); + req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_req_send(bp, req); + } if (rc != -ENODEV) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } @@ -12252,7 +12160,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (rc) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted\n"); @@ -12872,7 +12780,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->edev); @@ -12910,8 +12817,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) if (!fw_dflt) return 0; + mutex_lock(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { + mutex_unlock(&bp->link_lock); netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", rc); return rc; @@ -12924,6 +12833,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) link_info->support_auto_speeds = link_info->support_speeds; bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); return 0; } @@ -13472,7 +13382,6 @@ init_err_cleanup: init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_ethtool_free(bp); bnxt_ptp_clear(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7b989b6e4f6e..a8212dcdad5f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -669,37 +669,7 @@ struct nqe_cn { #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) -#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define DFLT_HWRM_CMD_TIMEOUT 500 -#define HWRM_CMD_MAX_TIMEOUT 40000 -#define SHORT_HWRM_CMD_TIMEOUT 20 -#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) -#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) -#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define BNXT_HWRM_REQ_MAX_SIZE 128 -#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ - BNXT_HWRM_REQ_MAX_SIZE) -#define HWRM_SHORT_MIN_TIMEOUT 3 -#define HWRM_SHORT_MAX_TIMEOUT 10 -#define HWRM_SHORT_TIMEOUT_COUNTER 5 - -#define HWRM_MIN_TIMEOUT 25 -#define HWRM_MAX_TIMEOUT 40 - -#define HWRM_WAIT_MUST_ABORT(bp, req) \ - (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \ - !bnxt_is_fw_healthy(bp)) - -#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ - ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ - (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ - ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) - -#define HWRM_VALID_BIT_DELAY_USEC 150 - -#define BNXT_HWRM_CHNL_CHIMP 0 -#define BNXT_HWRM_CHNL_KONG 1 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -939,6 +909,8 @@ struct bnxt_rx_sw_stats { u64 rx_l4_csum_errors; u64 rx_resets; u64 rx_buf_errors; + u64 rx_oom_discards; + u64 rx_netpoll_discards; }; struct bnxt_cmn_sw_stats { @@ -1908,13 +1880,8 @@ struct bnxt { u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; - u16 hwrm_intr_seq_id; - void *hwrm_short_cmd_req_addr; - dma_addr_t hwrm_short_cmd_req_dma_addr; - void *hwrm_cmd_resp_addr; - dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_cmd_kong_resp_addr; - dma_addr_t hwrm_cmd_kong_resp_dma_addr; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; struct rtnl_link_stats64 net_stats_prev; struct bnxt_stats_mem port_stats; @@ -2014,7 +1981,7 @@ struct bnxt { struct mutex sriov_lock; #endif -#if BITS_PER_LONG == 32 +#ifndef writeq /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ spinlock_t db_lock; #endif @@ -2143,7 +2110,7 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } -#if BITS_PER_LONG == 32 +#ifndef writeq #define writeq(val64, db) \ do { \ spin_lock(&bp->db_lock); \ @@ -2185,63 +2152,6 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } -static inline bool bnxt_cfa_hwrm_message(u16 req_type) -{ - switch (req_type) { - case HWRM_CFA_ENCAP_RECORD_ALLOC: - case HWRM_CFA_ENCAP_RECORD_FREE: - case HWRM_CFA_DECAP_FILTER_ALLOC: - case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_EM_FLOW_ALLOC: - case HWRM_CFA_EM_FLOW_FREE: - case HWRM_CFA_EM_FLOW_CFG: - case HWRM_CFA_FLOW_ALLOC: - case HWRM_CFA_FLOW_FREE: - case HWRM_CFA_FLOW_INFO: - case HWRM_CFA_FLOW_FLUSH: - case HWRM_CFA_FLOW_STATS: - case HWRM_CFA_METER_PROFILE_ALLOC: - case HWRM_CFA_METER_PROFILE_FREE: - case HWRM_CFA_METER_PROFILE_CFG: - case HWRM_CFA_METER_INSTANCE_ALLOC: - case HWRM_CFA_METER_INSTANCE_FREE: - return true; - default: - return false; - } -} - -static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); -} - -static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); -} - -static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) -{ - if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) - return bp->hwrm_cmd_kong_resp_addr; - else - return bp->hwrm_cmd_resp_addr; -} - -static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) -{ - u16 seq_id; - - if (dst == BNXT_HWRM_CHNL_CHIMP) - seq_id = bp->hwrm_cmd_seq++; - else - seq_id = bp->hwrm_cmd_kong_seq++; - return seq_id; -} - extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -2251,11 +2161,6 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); -void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); -int _hwrm_send_message(struct bnxt *, void *, u32, int); -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); -int hwrm_send_message(struct bnxt *, void *, u32, int); -int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8a68df4d9e59..228a5db7e143 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -18,6 +18,7 @@ #include <rdma/ib_verbs.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB @@ -38,38 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_cfg_input req = {0}; + struct hwrm_queue_pri2cos_cfg_input *req; u8 *pri2cos; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | - QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | + QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); - pri2cos = &req.pri0_cos_queue_id; + pri2cos = &req->pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 qidx; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); qidx = bp->tc_to_qidx[ets->prio_tc[i]]; pri2cos[i] = bp->q_info[qidx].queue_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req = {0}; - int rc = 0; + struct hwrm_queue_pri2cos_qcfg_output *resp; + struct hwrm_queue_pri2cos_qcfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i; @@ -83,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->prio_tc[i] = tc; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, u8 max_tc) { - struct hwrm_queue_cos2bw_cfg_input req = {0}; + struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); for (i = 0; i < max_tc; i++) { u8 qidx = bp->tc_to_qidx[i]; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx); @@ -120,30 +129,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); + data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { - req.queue_id0 = cos2bw.queue_id; - req.unused_0 = 0; + req->queue_id0 = cos2bw.queue_id; + req->unused_0 = 0; } } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_cos2bw_qcfg_input req = {0}; + struct hwrm_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -167,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } @@ -229,11 +240,12 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_cfg_input req = {0}; + struct hwrm_queue_pfcenable_cfg_input *req; struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; bool need_q_remap = false; + int rc; if (!my_ets) return -EINVAL; @@ -266,38 +278,43 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) if (need_q_remap) bnxt_queue_remap(bp, tc_mask); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(pri_mask); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pfcenable_qcfg_input req = {0}; + struct hwrm_queue_pfcenable_qcfg_output *resp; + struct hwrm_queue_pfcenable_qcfg_input *req; u8 pri_mask; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_fw_set_structured_data_input set = {0}; - struct hwrm_fw_get_structured_data_input get = {0}; + struct hwrm_fw_set_structured_data_input *set; + struct hwrm_fw_get_structured_data_input *get; struct hwrm_struct_data_dcbx_app *fw_app; struct hwrm_struct_hdr *data; dma_addr_t mapping; @@ -307,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10601) return 0; + rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA); + if (rc) + return rc; + + hwrm_req_hold(bp, get); + hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO); + n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; + data = hwrm_req_dma_slice(bp, get, data_len, &mapping); + if (!data) { + rc = -ENOMEM; + goto set_app_exit; + } - bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); - get.dest_data_addr = cpu_to_le64(mapping); - get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); - get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - get.count = 0; - rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT); + get->dest_data_addr = cpu_to_le64(mapping); + get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get->count = 0; + rc = hwrm_req_send(bp, get); if (rc) goto set_app_exit; @@ -365,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, data->len = cpu_to_le16(sizeof(*fw_app) * n); data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1); - set.src_data_addr = cpu_to_le64(mapping); - set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); - set.hdr_cnt = 1; - rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA); + if (rc) + goto set_app_exit; + + set->src_data_addr = cpu_to_le64(mapping); + set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set->hdr_cnt = 1; + rc = hwrm_req_send(bp, set); set_app_exit: - dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); + hwrm_req_drop(bp, get); /* dropping get request and associated slice */ return rc; } static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) { - struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_dscp_qcaps_input req = {0}; + struct hwrm_queue_dscp_qcaps_output *resp; + struct hwrm_queue_dscp_qcaps_input *req; int rc; bp->max_dscp_value = 0; if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; if (bp->max_dscp_value < 0x3f) bp->max_dscp_value = 0; } - - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_queue_dscp2pri_cfg_input req = {0}; + struct hwrm_queue_dscp2pri_cfg_input *req; struct bnxt_dscp2pri_entry *dscp2pri; dma_addr_t mapping; int rc; @@ -410,23 +439,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10800) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); - dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), - &mapping, GFP_KERNEL); - if (!dscp2pri) + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG); + if (rc) + return rc; + + dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping); + if (!dscp2pri) { + hwrm_req_drop(bp, req); return -ENOMEM; + } - req.src_data_addr = cpu_to_le64(mapping); + req->src_data_addr = cpu_to_le64(mapping); dscp2pri->dscp = app->protocol; if (add) dscp2pri->mask = 0x3f; else dscp2pri->mask = 0; dscp2pri->pri = app->priority; - req.entry_cnt = cpu_to_le16(1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, - mapping); + req->entry_cnt = cpu_to_le16(1); + rc = hwrm_req_send(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 2cd8bb37e641..1423cc617d93 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -12,6 +12,7 @@ #include <net/devlink.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_ethtool.h" @@ -354,28 +355,34 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, union devlink_param_value *nvm_cfg_ver) { - struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_input *req; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) - return -ENOMEM; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + if (!data) { + rc = -ENOMEM; + goto exit; + } - req.dest_data_addr = cpu_to_le64(data_dma_addr); - req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); - req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); + hwrm_req_hold(bp, req); + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); + req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); if (!rc) bnxt_copy_from_nvm_data(nvm_cfg_ver, data, BNXT_NVM_CFG_VER_BITS, BNXT_NVM_CFG_VER_BYTES); - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); +exit: + hwrm_req_drop(bp, req); return rc; } @@ -562,17 +569,20 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, } static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - int msg_len, union devlink_param_value *val) + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; struct bnxt_dl_nvm_param nvm_param; + struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int idx = 0, rc, i; /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); return -EPERM; + } for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { if (nvm_params[i].id == param_id) { @@ -581,18 +591,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } } - if (i == ARRAY_SIZE(nvm_params)) + if (i == ARRAY_SIZE(nvm_params)) { + hwrm_req_drop(bp, req); return -EOPNOTSUPP; + } if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + + if (!data) { + hwrm_req_drop(bp, req); return -ENOMEM; + } req->dest_data_addr = cpu_to_le64(data_dma_addr); req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); @@ -601,26 +615,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); + resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, msg); } else { - rc = hwrm_send_message_silent(bp, msg, msg_len, - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); } else { - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) rc = -EOPNOTSUPP; } } - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + hwrm_req_drop(bp, req); if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); return rc; @@ -629,15 +641,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_get_variable_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); - if (!rc) - if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) - ctx->val.vbool = !ctx->val.vbool; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); + if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; return rc; } @@ -645,15 +659,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_set_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_set_variable_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); + if (rc) + return rc; if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) ctx->val.vbool = !ctx->val.vbool; - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9f8c72d95228..b056e3c29bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -24,6 +24,7 @@ #include <linux/timecounter.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -307,6 +308,7 @@ static const char * const bnxt_cmn_sw_stats_str[] = { enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, + RX_NETPOLL_DISCARDS, }; static struct { @@ -315,6 +317,7 @@ static struct { } bnxt_sw_func_stats[] = { {0, "rx_total_discard_pkts"}, {0, "tx_total_discard_pkts"}, + {0, "rx_total_netpoll_discards"}, }; #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) @@ -603,6 +606,8 @@ skip_tpa_ring_stats: BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); + bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += + cpr->sw_stats.rx.rx_netpoll_discards; } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -1361,7 +1366,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { struct pcie_ctx_hw_stats *hw_pcie_stats; - struct hwrm_pcie_qstats_input req = {0}; + struct hwrm_pcie_qstats_input *req; struct bnxt *bp = netdev_priv(dev); dma_addr_t hw_pcie_stats_addr; int rc; @@ -1372,18 +1377,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) return; - hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, - sizeof(*hw_pcie_stats), - &hw_pcie_stats_addr, GFP_KERNEL); - if (!hw_pcie_stats) + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) return; + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) { + hwrm_req_drop(bp, req); + return; + } + regs->version = 1; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); - req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); /* hold on to slice */ + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *src = (__le64 *)hw_pcie_stats; u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); @@ -1392,9 +1400,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) dst[i] = le64_to_cpu(src[i]); } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, - hw_pcie_stats_addr); + hwrm_req_drop(bp, req); } static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1974,7 +1980,7 @@ static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, static int bnxt_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info; u32 new_cfg, fec = fecparam->fec; @@ -2006,9 +2012,11 @@ static int bnxt_set_fecparam(struct net_device *dev, } apply_fec: - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_req_send(bp, req); /* update current settings */ if (!rc) { mutex_lock(&bp->link_lock); @@ -2102,19 +2110,22 @@ static u32 bnxt_get_link(struct net_device *dev) int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info) { - struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_nvm_get_dev_info_input req = {0}; + struct hwrm_nvm_get_dev_info_output *resp; + struct hwrm_nvm_get_dev_info_input *req; int rc; if (BNXT_VF(bp)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(nvm_dev_info, resp, sizeof(*resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2127,77 +2138,67 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - u32 dir_item_len, const u8 *data, - size_t data_len) +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) { struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_write_input *req; int rc; - struct hwrm_nvm_write_input req = {0}; - dma_addr_t dma_handle; - u8 *kmem = NULL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); + if (rc) + return rc; - req.dir_type = cpu_to_le16(dir_type); - req.dir_ordinal = cpu_to_le16(dir_ordinal); - req.dir_ext = cpu_to_le16(dir_ext); - req.dir_attr = cpu_to_le16(dir_attr); - req.dir_item_length = cpu_to_le32(dir_item_len); if (data_len && data) { - req.dir_data_length = cpu_to_le32(data_len); + dma_addr_t dma_handle; + u8 *kmem; - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) + kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, req); return -ENOMEM; + } + + req->dir_data_length = cpu_to_le32(data_len); memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); + req->host_src_addr = cpu_to_le64(dma_handle); } - rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - if (kmem) - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + req->dir_type = cpu_to_le16(dir_type); + req->dir_ordinal = cpu_to_le16(dir_ordinal); + req->dir_ext = cpu_to_le16(dir_ext); + req->dir_attr = cpu_to_le16(dir_attr); + req->dir_item_length = cpu_to_le32(dir_item_len); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } -static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - const u8 *data, size_t data_len) -{ - struct bnxt *bp = netdev_priv(dev); - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, - 0, data, data_len); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags) { - struct hwrm_fw_reset_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (rc) + return rc; - req.embedded_proc_type = proc_type; - req.selfrst_status = self_reset; - req.flags = flags; + req->embedded_proc_type = proc_type; + req->selfrst_status = self_reset; + req->flags = flags; if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { - rc = hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); } else { - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); } @@ -2335,7 +2336,7 @@ static int bnxt_flash_firmware(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); if (rc == 0) /* Firmware update successful */ rc = bnxt_firmware_reset(dev, dir_type); @@ -2388,7 +2389,7 @@ static int bnxt_flash_microcode(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); return rc; } @@ -2454,7 +2455,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); else rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw->data, fw->size); + 0, 0, 0, fw->data, fw->size); release_firmware(fw); return rc; } @@ -2466,21 +2467,23 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type) { - struct hwrm_nvm_install_update_input install = {0}; - struct hwrm_nvm_install_update_output resp = {0}; - struct hwrm_nvm_modify_input modify = {0}; + struct hwrm_nvm_install_update_input *install; + struct hwrm_nvm_install_update_output *resp; + struct hwrm_nvm_modify_input *modify; struct bnxt *bp = netdev_priv(dev); bool defrag_attempted = false; dma_addr_t dma_handle; u8 *kmem = NULL; u32 modify_len; u32 item_len; - int rc = 0; u16 index; + int rc; bnxt_hwrm_fw_set_time(bp); - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); + if (rc) + return rc; /* Try allocating a large DMA buffer first. Older fw will * cause excessive NVRAM erases when using small blocks. @@ -2488,22 +2491,33 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware modify_len = roundup_pow_of_two(fw->size); modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); while (1) { - kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, - &dma_handle, GFP_KERNEL); + kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); if (!kmem && modify_len > PAGE_SIZE) modify_len /= 2; else break; } - if (!kmem) + if (!kmem) { + hwrm_req_drop(bp, modify); return -ENOMEM; + } - modify.host_src_addr = cpu_to_le64(dma_handle); + rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); + if (rc) { + hwrm_req_drop(bp, modify); + return rc; + } + + hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); + hwrm_req_hold(bp, modify); + modify->host_src_addr = cpu_to_le64(dma_handle); + + resp = hwrm_req_hold(bp, install); if ((install_type & 0xffff) == 0) install_type >>= 16; - install.install_type = cpu_to_le32(install_type); + install->install_type = cpu_to_le32(install_type); do { u32 copied = 0, len = modify_len; @@ -2523,76 +2537,69 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware break; } - modify.dir_idx = cpu_to_le16(index); + modify->dir_idx = cpu_to_le16(index); if (fw->size > modify_len) - modify.flags = BNXT_NVM_MORE_FLAG; + modify->flags = BNXT_NVM_MORE_FLAG; while (copied < fw->size) { u32 balance = fw->size - copied; if (balance <= modify_len) { len = balance; if (copied) - modify.flags |= BNXT_NVM_LAST_FLAG; + modify->flags |= BNXT_NVM_LAST_FLAG; } memcpy(kmem, fw->data + copied, len); - modify.len = cpu_to_le32(len); - modify.offset = cpu_to_le32(copied); - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + modify->len = cpu_to_le32(len); + modify->offset = cpu_to_le32(copied); + rc = hwrm_req_send(bp, modify); if (rc) goto pkg_abort; copied += len; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + + rc = hwrm_req_send_silent(bp, install); if (defrag_attempted) { /* We have tried to defragment already in the previous * iteration. Return with the result for INSTALL_UPDATE */ - mutex_unlock(&bp->hwrm_cmd_lock); break; } - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags = + install->flags = cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message_silent(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + rc = hwrm_req_send_silent(bp, install); - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { /* FW has cleared NVM area, driver will create * UPDATE directory and try the flash again */ defrag_attempted = true; - install.flags = 0; - rc = __bnxt_flash_nvram(bp->dev, - BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, - 0, 0, item_len, NULL, - 0); + install->flags = 0; + rc = bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, 0); } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); } while (defrag_attempted && !rc); pkg_abort: - dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); - if (resp.result) { + hwrm_req_drop(bp, modify); + hwrm_req_drop(bp, install); + + if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", - (s8)resp.result, (int)resp.problem_item); + (s8)resp->result, (int)resp->problem_item); rc = -ENOPKG; } if (rc == -EACCES) @@ -2638,20 +2645,22 @@ static int bnxt_flash_device(struct net_device *dev, static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) { + struct hwrm_nvm_get_dir_info_output *output; + struct hwrm_nvm_get_dir_info_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_get_dir_info_input req = {0}; - struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *entries = le32_to_cpu(output->entries); *length = le32_to_cpu(output->entry_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2677,7 +2686,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) u8 *buf; size_t buflen; dma_addr_t dma_handle; - struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_input *req; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) @@ -2695,20 +2704,23 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) len -= 2; memset(data, 0xff, len); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); + if (rc) + return rc; + buflen = dir_entries * entry_length; - buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, - GFP_KERNEL); + buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)buflen); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); - dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2719,28 +2731,31 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, int rc; u8 *buf; dma_addr_t dma_handle; - struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_input *req; if (!length) return -EINVAL; - buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, - GFP_KERNEL); + rc = hwrm_req_init(bp, req, HWRM_NVM_READ); + if (rc) + return rc; + + buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)length); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - req.dir_idx = cpu_to_le16(index); - req.offset = cpu_to_le32(offset); - req.len = cpu_to_le32(length); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + req->dir_idx = cpu_to_le16(index); + req->offset = cpu_to_le32(offset); + req->len = cpu_to_le32(length); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, length); - dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2748,20 +2763,23 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length) { + struct hwrm_nvm_find_dir_entry_output *output; + struct hwrm_nvm_find_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_find_dir_entry_input req = {0}; - struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); - req.enables = 0; - req.dir_idx = 0; - req.dir_type = cpu_to_le16(type); - req.dir_ordinal = cpu_to_le16(ordinal); - req.dir_ext = cpu_to_le16(ext); - req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); + if (rc) + return rc; + + req->enables = 0; + req->dir_idx = 0; + req->dir_type = cpu_to_le16(type); + req->dir_ordinal = cpu_to_le16(ordinal); + req->dir_ext = cpu_to_le16(ext); + req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -2770,7 +2788,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2865,12 +2883,16 @@ static int bnxt_get_eeprom(struct net_device *dev, static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { + struct hwrm_nvm_erase_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_erase_dir_entry_input req = {0}; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); - req.dir_idx = cpu_to_le16(index); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); + if (rc) + return rc; + + req->dir_idx = cpu_to_le16(index); + return hwrm_req_send(bp, req); } static int bnxt_set_eeprom(struct net_device *dev, @@ -2910,7 +2932,7 @@ static int bnxt_set_eeprom(struct net_device *dev, ordinal = eeprom->offset >> 16; attr = eeprom->offset & 0xffff; - return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, eeprom->len); } @@ -2998,31 +3020,33 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, u16 page_number, u16 start_addr, u16 data_length, u8 *buf) { - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_output *output; + struct hwrm_port_phy_i2c_read_input *req; int rc, byte_offset = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = i2c_addr; - req.page_number = cpu_to_le16(page_number); - req.port_id = cpu_to_le16(bp->pf.port_id); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + req->i2c_slave_addr = i2c_addr; + req->page_number = cpu_to_le16(page_number); + req->port_id = cpu_to_le16(bp->pf.port_id); do { u16 xfer_size; xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); data_length -= xfer_size; - req.page_offset = cpu_to_le16(start_addr + byte_offset); - req.data_length = xfer_size; - req.enables = cpu_to_le32(start_addr + byte_offset ? + req->page_offset = cpu_to_le16(start_addr + byte_offset); + req->data_length = xfer_size; + req->enables = cpu_to_le32(start_addr + byte_offset ? PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(buf + byte_offset, output->data, xfer_size); - mutex_unlock(&bp->hwrm_cmd_lock); byte_offset += xfer_size; } while (!rc && data_length > 0); + hwrm_req_drop(bp, req); return rc; } @@ -3131,13 +3155,13 @@ static int bnxt_nway_reset(struct net_device *dev) static int bnxt_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { - struct hwrm_port_led_cfg_input req = {0}; + struct hwrm_port_led_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_pf_info *pf = &bp->pf; struct bnxt_led_cfg *led_cfg; u8 led_state; __le16 duration; - int i; + int rc, i; if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@ -3151,27 +3175,35 @@ static int bnxt_set_phys_id(struct net_device *dev, } else { return -EINVAL; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.num_leds = bp->num_leds; - led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; for (i = 0; i < bp->num_leds; i++, led_cfg++) { - req.enables |= BNXT_LED_DFLT_ENABLES(i); + req->enables |= BNXT_LED_DFLT_ENABLES(i); led_cfg->led_id = bp->leds[i].led_id; led_cfg->led_state = led_state; led_cfg->led_blink_on = duration; led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) { - struct hwrm_selftest_irq_input req = {0}; + struct hwrm_selftest_irq_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); + if (rc) + return rc; + + req->cmpl_ring = cpu_to_le16(cmpl_ring); + return hwrm_req_send(bp, req); } static int bnxt_test_irq(struct bnxt *bp) @@ -3191,31 +3223,37 @@ static int bnxt_test_irq(struct bnxt *bp) static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) { - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); if (enable) - req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; else - req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_req_send(bp, req); } static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) { - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3250,7 +3288,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, req->force_link_speed = cpu_to_le16(fw_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); req->flags = 0; req->force_link_speed = cpu_to_le16(0); return rc; @@ -3258,21 +3296,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ + hwrm_req_hold(bp, req); if (enable) { - bnxt_disable_an_for_lpbk(bp, &req); + bnxt_disable_an_for_lpbk(bp, req); if (ext) - req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; else - req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; } else { - req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; } - req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + rc = hwrm_req_send(bp, req); + hwrm_req_drop(bp, req); + return rc; } static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3390,17 +3436,21 @@ static int bnxt_run_loopback(struct bnxt *bp) static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) { - struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_exec_input req = {0}; + struct hwrm_selftest_exec_output *resp; + struct hwrm_selftest_exec_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - resp->test_success = 0; - req.flags = test_mask; - rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, bp->test_info->timeout); + req->flags = test_mask; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); *test_results = resp->test_success; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3559,32 +3609,34 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return 0; } -static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, struct bnxt_hwrm_dbg_dma_info *info) { - struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; struct hwrm_dbg_cmn_input *cmn_req = msg; __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; u16 seq = 0, len, segs_off; - void *resp = cmn_resp; dma_addr_t dma_handle; + void *dma_buf, *resp; int rc, off = 0; - void *dma_buf; - dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, - GFP_KERNEL); - if (!dma_buf) + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments); cmn_req->host_dest_addr = cpu_to_le64(dma_handle); cmn_req->host_buf_len = cpu_to_le32(info->dma_len); - mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, - HWRM_COREDUMP_TIMEOUT); + rc = hwrm_req_send(bp, msg); if (rc) break; @@ -3628,26 +3680,27 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, seq++; off += len; } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); + hwrm_req_drop(bp, msg); return rc; } static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, struct bnxt_coredump *coredump) { - struct hwrm_dbg_coredump_list_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; info.dma_len = COREDUMP_LIST_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, data_len); - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) { coredump->data = info.dest_buf; coredump->data_size = info.dest_buf_size; @@ -3659,26 +3712,34 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, u16 segment_id) { - struct hwrm_dbg_coredump_initiate_input req = {0}; + struct hwrm_dbg_coredump_initiate_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, void *buf, u32 buf_len, u32 offset) { - struct hwrm_dbg_coredump_retrieve_input req = {0}; + struct hwrm_dbg_coredump_retrieve_input *req; struct bnxt_hwrm_dbg_dma_info info = {NULL}; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, @@ -3691,7 +3752,7 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, info.seg_start = offset; } - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) *seg_len = info.dest_buf_size; @@ -3970,8 +4031,8 @@ static int bnxt_get_ts_info(struct net_device *dev, void bnxt_ethtool_init(struct bnxt *bp) { - struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_qlist_input req = {0}; + struct hwrm_selftest_qlist_output *resp; + struct hwrm_selftest_qlist_input *req; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; int i, rc; @@ -3983,19 +4044,22 @@ void bnxt_ethtool_init(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - goto ethtool_init_exit; - test_info = bp->test_info; - if (!test_info) + if (!test_info) { test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); - if (!test_info) + if (!test_info) + return; + bp->test_info = test_info; + } + + if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) goto ethtool_init_exit; - bp->test_info = test_info; bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; if (bp->num_tests > BNXT_MAX_TEST) bp->num_tests = BNXT_MAX_TEST; @@ -4029,7 +4093,7 @@ void bnxt_ethtool_init(struct bnxt *bp) } ethtool_init_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static void bnxt_get_eth_phy_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000000..acef61abe35d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -0,0 +1,763 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type) +{ + return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL; +} + +/** + * __hwrm_req_init() - Initialize an HWRM request. + * @bp: The driver context. + * @req: A pointer to the request pointer to initialize. + * @req_type: The request type. This will be converted to the little endian + * before being written to the req_type field of the returned request. + * @req_len: The length of the request to be allocated. + * + * Allocate DMA resources and initialize a new HWRM request object of the + * given type. The response address field in the request is configured with + * the DMA bus address that has been mapped for the response and the passed + * request is pointed to kernel virtual memory mapped for the request (such + * that short_input indirection can be accomplished without copying). The + * request’s target and completion ring are initialized to default values and + * can be overridden by writing to the returned request object directly. + * + * The initialized request can be further customized by writing to its fields + * directly, taking care to covert such fields to little endian. The request + * object will be consumed (and all its associated resources release) upon + * passing it to hwrm_req_send() unless ownership of the request has been + * claimed by the caller via a call to hwrm_req_hold(). If the request is not + * consumed, either because it is never sent or because ownership has been + * claimed, then it must be released by a call to hwrm_req_drop(). + * + * Return: zero on success, negative error code otherwise: + * E2BIG: the type of request pointer is too large to fit. + * ENOMEM: an allocation failure occurred. + */ +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len) +{ + struct bnxt_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnxt_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +/** + * hwrm_req_timeout() - Set the completion timeout for the request. + * @bp: The driver context. + * @req: The request to set the timeout. + * @timeout: The timeout in milliseconds. + * + * Set the timeout associated with the request for subsequent calls to + * hwrm_req_send(). Some requests are long running and require a different + * timeout than the default. + */ +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->timeout = timeout; +} + +/** + * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices. + * @bp: The driver context. + * @req: The request for which calls to hwrm_req_dma_slice() will have altered + * allocation flags. + * @flags: A bitmask of GFP flags. These flags are passed to + * dma_alloc_coherent() whenever it is used to allocate backing memory + * for slices. Note that calls to hwrm_req_dma_slice() will not always + * result in new allocations, however, memory suballocated from the + * request buffer is already __GFP_ZERO. + * + * Sets the GFP allocation flags associated with the request for subsequent + * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO + * for slice allocations. + */ +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->gfp = gfp; +} + +/** + * hwrm_req_replace() - Replace request data. + * @bp: The driver context. + * @req: The request to modify. A call to hwrm_req_replace() is conceptually + * an assignment of new_req to req. Subsequent calls to HWRM API functions, + * such as hwrm_req_send(), should thus use req and not new_req (in fact, + * calls to HWRM API functions will fail if non-managed request objects + * are passed). + * @len: The length of new_req. + * @new_req: The pre-built request to copy or reference. + * + * Replaces the request data in req with that of new_req. This is useful in + * scenarios where a request object has already been constructed by a third + * party prior to creating a resource managed request using hwrm_req_init(). + * Depending on the length, hwrm_req_replace() will either copy the new + * request data into the DMA memory allocated for req, or it will simply + * reference the new request and use it in lieu of req during subsequent + * calls to hwrm_req_send(). The resource management is associated with + * req and is independent of and does not apply to new_req. The caller must + * ensure that the lifetime of new_req is least as long as req. Any slices + * that may have been associated with the original request are released. + * + * Return: zero on success, negative error code otherwise: + * E2BIG: Request is too large. + * EINVAL: Invalid request to modify. + */ +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *internal_req = req; + u16 req_type; + + if (!ctx) + return -EINVAL; + + if (len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + /* free any existing slices */ + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + if (ctx->slice_addr) { + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + ctx->slice_addr = NULL; + } + ctx->gfp = GFP_KERNEL; + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) { + memcpy(internal_req, new_req, len); + } else { + internal_req->req_type = ((struct input *)new_req)->req_type; + ctx->req = new_req; + } + + ctx->req_len = len; + ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle + + BNXT_HWRM_RESP_OFFSET); + + /* update sentinel for potentially new request type */ + req_type = le16_to_cpu(internal_req->req_type); + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + + return 0; +} + +/** + * hwrm_req_flags() - Set non internal flags of the ctx + * @bp: The driver context. + * @req: The request containing the HWRM command + * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set + * + * ctx flags can be used by the callers to instruct how the subsequent + * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags + * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send() + * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout + * even if FW is not responding. + * This generic function can be used to set any flag that is not an internal flag + * of the HWRM module. + */ +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->flags |= (flags & HWRM_API_FLAGS); +} + +/** + * hwrm_req_hold() - Claim ownership of the request's resources. + * @bp: The driver context. + * @req: A pointer to the request to own. The request will no longer be + * consumed by calls to hwrm_req_send(). + * + * Take ownership of the request. Ownership places responsibility on the + * caller to free the resources associated with the request via a call to + * hwrm_req_drop(). The caller taking ownership implies that a subsequent + * call to hwrm_req_send() will not consume the request (ie. sending will + * not free the associated resources if the request is owned by the caller). + * Taking ownership returns a reference to the response. Retaining and + * accessing the response data is the most common reason to take ownership + * of the request. Ownership can also be acquired in order to reuse the same + * request object across multiple invocations of hwrm_req_send(). + * + * Return: A pointer to the response object. + * + * The resources associated with the response will remain available to the + * caller until ownership of the request is relinquished via a call to + * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if + * a valid request is provided. A returned NULL value would imply a driver + * bug and the implementation will complain loudly in the logs to aid in + * detection. It should not be necessary to check the result for NULL. + */ +void *hwrm_req_hold(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED; + return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle); +} + +/** + * hwrm_req_drop() - Release all resources associated with the request. + * @bp: The driver context. + * @req: The request to consume, releasing the associated resources. The + * request object, any slices, and its associated response are no + * longer valid. + * + * It is legal to call hwrm_req_drop() on an unowned request, provided it + * has not already been consumed by hwrm_req_send() (for example, to release + * an aborted request). A given request should not be dropped more than once, + * nor should it be dropped after having been consumed by hwrm_req_send(). To + * do so is an error (the context will not be found and a stack trace will be + * rendered in the kernel log). + */ +void hwrm_req_drop(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + __hwrm_ctx_drop(bp, ctx); +} + +static int __hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +static struct bnxt_hwrm_wait_token * +__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst) +{ + struct bnxt_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bp->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNXT_HWRM_PENDING; + if (dst == BNXT_HWRM_CHNL_CHIMP) { + token->seq_id = bp->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list); + } else { + token->seq_id = bp->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token) +{ + if (token->dst == BNXT_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void +hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) +{ + struct bnxt_hwrm_wait_token *token; + + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) { + if (token->seq_id == seq_id) { + WRITE_ONCE(token->state, state); + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); +} + +static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + struct bnxt_hwrm_wait_token *token = NULL; + struct hwrm_short_input short_input = {0}; + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; + unsigned int i, timeout, tmo_count; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + int rc = -EBUSY; + u32 req_type; + u16 len = 0; + u8 *valid; + + if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + req_type = le16_to_cpu(ctx->req->req_type); + if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) + goto exit; + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN && + msg_len > bp->hwrm_max_ext_req_len) { + rc = -E2BIG; + goto exit; + } + + if (bnxt_kong_hwrm_message(bp, ctx->req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n", + req_type); + rc = -EINVAL; + goto exit; + } + } + + token = __hwrm_acquire_token(bp, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { + short_input.req_type = ctx->req->req_type; + short_input.signature = + cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = cpu_to_le16(msg_len); + short_input.req_addr = cpu_to_le64(ctx->dma_handle); + + data = (u32 *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bp->bar0 + bar_offset + i); + + /* Ring channel doorbell */ + writel(1, bp->bar0 + doorbell_offset); + + if (!pci_is_enabled(bp->pdev)) { + rc = -ENODEV; + goto exit; + } + + /* Limit timeout to an upper limit */ + timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT); + /* convert timeout to usec */ + timeout *= 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE && + i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + break; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(ctx->req->req_type)); + goto exit; + } + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */ + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (token && + READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) { + __hwrm_release_token(bp, token); + token = NULL; + } + + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + if (len) { + __le16 resp_seq = READ_ONCE(ctx->resp->seq_id); + + if (resp_seq == ctx->req->seq_id) + break; + if (resp_seq != seen_out_of_seq) { + netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", + le16_to_cpu(resp_seq), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id)); + seen_out_of_seq = resp_seq; + } + } + + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + goto timeout_abort; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (i >= tmo_count) { +timeout_abort: + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + usleep_range(1, 5); + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len, + *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) { + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(ctx->resp->req_type), + le16_to_cpu(ctx->resp->seq_id), rc); + } + rc = __hwrm_to_stderr(rc); +exit: + if (token) + __hwrm_release_token(bp, token); + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) + ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY; + else + __hwrm_ctx_drop(bp, ctx); + return rc; +} + +/** + * hwrm_req_send() - Execute an HWRM command. + * @bp: The driver context. + * @req: A pointer to the request to send. The DMA resources associated with + * the request will be released (ie. the request will be consumed) unless + * ownership of the request has been assumed by the caller via a call to + * hwrm_req_hold(). + * + * Send an HWRM request to the device and wait for a response. The request is + * consumed if it is not owned by the caller. This function will block until + * the request has either completed or times out due to an error. + * + * Return: A result code. + * + * The result is zero on success, otherwise the negative error code indicates + * one of the following errors: + * E2BIG: The request was too large. + * EBUSY: The firmware is in a fatal state or the request timed out + * EACCESS: HWRM access denied. + * ENOSPC: HWRM resource allocation error. + * EINVAL: Request parameters are invalid. + * ENOMEM: HWRM has no buffers. + * EAGAIN: HWRM busy or reset in progress. + * EOPNOTSUPP: Invalid request type. + * EIO: Any other error. + * Error handling is orthogonal to request ownership. An unowned request will + * still be consumed on error. If the caller owns the request, then the caller + * is responsible for releasing the resources. Otherwise, hwrm_req_send() will + * always consume the request. + */ +int hwrm_req_send(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send(bp, ctx); +} + +/** + * hwrm_req_send_silent() - A silent version of hwrm_req_send(). + * @bp: The driver context. + * @req: The request to send without logging. + * + * The same as hwrm_req_send(), except that the request is silenced using + * hwrm_req_silence() prior the call. This version of the function is + * provided solely to preserve the legacy API’s flavor for this functionality. + * + * Return: A result code, see hwrm_req_send(). + */ +int hwrm_req_send_silent(struct bnxt *bp, void *req) +{ + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + return hwrm_req_send(bp, req); +} + +/** + * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory. + * @bp: The driver context. + * @req: The request for which indirect data will be associated. + * @size: The size of the allocation. + * @dma: The bus address associated with the allocation. The HWRM API has no + * knowledge about the type of the request and so cannot infer how the + * caller intends to use the indirect data. Thus, the caller is + * responsible for configuring the request object appropriately to + * point to the associated indirect memory. Note, DMA handle has the + * same definition as it does in dma_alloc_coherent(), the caller is + * responsible for endian conversions via cpu_to_le64() before assigning + * this address. + * + * Allocates DMA mapped memory for indirect data related to a request. The + * lifetime of the DMA resources will be bound to that of the request (ie. + * they will be automatically released when the request is either consumed by + * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are + * efficiently suballocated out of the request buffer space, hence the name + * slice, while larger requests are satisfied via an underlying call to + * dma_alloc_coherent(). Multiple suballocations are supported, however, only + * one externally mapped region is. + * + * Return: The kernel virtual address of the DMA mapping. + */ +void * +hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + /* could not suballocate from ctx buffer, try create a new mapping */ + if (ctx->slice_addr) { + /* if one exists, can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp); + + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000000..4d17f0d5363b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -0,0 +1,145 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWRM_H +#define BNXT_HWRM_H + +#include "bnxt_hsi.h" + +enum bnxt_hwrm_ctx_flags { + /* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */ + BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */ + BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */ + BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */ + BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */ +}; + +#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT) + +struct bnxt_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + u32 slice_size; + u32 req_len; + enum bnxt_hwrm_ctx_flags flags; + unsigned int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnxt_hwrm_wait_state { + BNXT_HWRM_PENDING, + BNXT_HWRM_DEFERRED, + BNXT_HWRM_COMPLETE, + BNXT_HWRM_CANCELLED, +}; + +enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG }; + +struct bnxt_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnxt_hwrm_wait_state state; + enum bnxt_hwrm_chnl dst; + u16 seq_id; +}; + +void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); + +#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define HWRM_CMD_MAX_TIMEOUT 40000 +#define SHORT_HWRM_CMD_TIMEOUT 20 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) +#define BNXT_HWRM_TARGET 0xffff +#define BNXT_HWRM_NO_CMPL_RING -1 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE +#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \ + BNXT_HWRM_RESP_RESERVED) +#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \ + sizeof(struct bnxt_hwrm_ctx)) +#define BNXT_HWRM_DMA_ALIGN 16 +#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +#define HWRM_WAIT_MUST_ABORT(bp, ctx) \ + (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \ + !bnxt_is_fw_healthy(bp)) + +static inline unsigned int hwrm_total_timeout(unsigned int n) +{ + return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : + HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + + (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT; +} + + +#define HWRM_VALID_BIT_DELAY_USEC 150 + +static inline bool bnxt_cfa_hwrm_message(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + return true; + default: + return false; + } +} + +static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + (bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)) || + le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); +} + +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); +#define hwrm_req_init(bp, req, req_type) \ + __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) +void *hwrm_req_hold(struct bnxt *bp, void *req); +void hwrm_req_drop(struct bnxt *bp, void *req); +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags); +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout); +int hwrm_req_send(struct bnxt *bp, void *req); +int hwrm_req_send_silent(struct bnxt *bp, void *req); +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); +void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 2fe3c9081f8d..f0aa480799ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -18,6 +18,7 @@ #include <linux/ptp_classify.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ptp.h" int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off) @@ -85,24 +86,28 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) { - struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_ts_query_input req = {0}; + struct hwrm_port_ts_query_output *resp; + struct hwrm_port_ts_query_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1); - req.flags = cpu_to_le32(flags); + rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { - req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); - req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); + req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + + rc = hwrm_req_send(bp, req); if (!rc) *ts = le64_to_cpu(resp->ptp_msg_ts); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -143,14 +148,17 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); - req.ptp_freq_adj_ppb = cpu_to_le32(ppb); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(ptp->bp, req); if (rc) netdev_err(ptp->bp->dev, "ptp adjfreq failed. rc = %d\n", rc); @@ -186,7 +194,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage) { - struct hwrm_func_ptp_pin_cfg_input req = {0}; + struct hwrm_func_ptp_pin_cfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u8 state = usage != BNXT_PPS_PIN_NONE; u8 *pin_state, *pin_usg; @@ -198,18 +206,21 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage) return -EOPNOTSUPP; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1); + rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG); + if (rc) + return rc; + enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE | FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2); - req.enables = cpu_to_le32(enables); + req->enables = cpu_to_le32(enables); - pin_state = &req.pin0_state; - pin_usg = &req.pin0_usage; + pin_state = &req->pin0_state; + pin_usg = &req->pin0_usage; *(pin_state + (pin * 2)) = state; *(pin_usg + (pin * 2)) = usage; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(ptp->bp, req); if (rc) return rc; @@ -221,12 +232,16 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage) static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event) { - struct hwrm_func_ptp_cfg_input req = {0}; + struct hwrm_func_ptp_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1); - req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT); - req.ptp_pps_event = event; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT); + req->ptp_pps_event = event; + return hwrm_req_send(bp, req); } void bnxt_ptp_reapply_pps(struct bnxt *bp) @@ -277,7 +292,7 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp, struct ptp_clock_request *rq) { - struct hwrm_func_ptp_cfg_input req = {0}; + struct hwrm_func_ptp_cfg_input *req; struct bnxt *bp = ptp->bp; struct timespec64 ts; u64 target_ns, delta; @@ -292,20 +307,22 @@ static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp, if (rc) return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD | FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP | FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE; - req.enables = cpu_to_le16(enables); - req.ptp_pps_event = 0; - req.ptp_freq_adj_dll_source = 0; - req.ptp_freq_adj_dll_phase = 0; - req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC); - req.ptp_freq_adj_ext_up = 0; - req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta); - - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le16(enables); + req->ptp_pps_event = 0; + req->ptp_freq_adj_dll_source = 0; + req->ptp_freq_adj_dll_phase = 0; + req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC); + req->ptp_freq_adj_ext_up = 0; + req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta); + + return hwrm_req_send(bp, req); } static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, @@ -362,11 +379,15 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) { - struct hwrm_port_mac_cfg_input req = {0}; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; u32 flags = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); if (ptp->rx_filter) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; else @@ -375,11 +396,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); - req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -630,11 +651,10 @@ static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin, return -EOPNOTSUPP; } -/* bp->hwrm_cmd_lock held by the caller */ static int bnxt_ptp_pps_init(struct bnxt *bp) { - struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_ptp_pin_qcfg_input req = {0}; + struct hwrm_func_ptp_pin_qcfg_output *resp; + struct hwrm_func_ptp_pin_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct ptp_clock_info *ptp_info; struct bnxt_pps *pps_info; @@ -642,11 +662,16 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) u32 i, rc; /* Query current/default PIN CFG */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG); + if (rc) + return rc; - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc || !resp->num_pins) + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc || !resp->num_pins) { + hwrm_req_drop(bp, req); return -EOPNOTSUPP; + } ptp_info = &ptp->ptp_info; pps_info = &ptp->pps_info; @@ -655,8 +680,10 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) ptp_info->pin_config = kcalloc(ptp_info->n_pins, sizeof(*ptp_info->pin_config), GFP_KERNEL); - if (!ptp_info->pin_config) + if (!ptp_info->pin_config) { + hwrm_req_drop(bp, req); return -ENOMEM; + } /* Report the TSIO capability to kernel */ pin_usg = &resp->pin0_usage; @@ -674,6 +701,7 @@ static int bnxt_ptp_pps_init(struct bnxt *bp) pps_info->pins[i].usage = *pin_usg; } + hwrm_req_drop(bp, req); /* Only 1 each of ext_ts and per_out pins is available in HW */ ptp_info->n_ext_ts = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7fa881e1cd80..70d8ca3039dc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -17,6 +17,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_vfr.h" @@ -26,21 +27,26 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { - struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_input *req; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; + if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); +exit: if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); @@ -62,10 +68,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); - struct bnxt_vf_info *vf; + struct hwrm_func_cfg_input *req; bool old_setting = false; + struct bnxt_vf_info *vf; u32 func_flags; int rc; @@ -89,36 +95,38 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(func_flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - if (setting) - vf->flags |= BNXT_VF_SPOOFCHK; - else - vf->flags &= ~BNXT_VF_SPOOFCHK; + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } } return rc; } static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) return rc; - } - vf->func_qcfg_flags = le16_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - return 0; + + req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + hwrm_req_drop(bp, req); + return rc; } bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) @@ -132,18 +140,22 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); if (vf->flags & BNXT_VF_TRUST) - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); else - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); } int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) @@ -203,8 +215,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; int rc; @@ -220,19 +232,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + memcpy(vf->mac_addr, mac, ETH_ALEN); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); } int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u16 vlan_tag; int rc; @@ -258,21 +274,23 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (vlan_tag == vf->vlan) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.dflt_vlan = cpu_to_le16(vlan_tag); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - vf->vlan = vlan_tag; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } return rc; } int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; @@ -296,16 +314,18 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(max_tx_rate); - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(min_tx_rate); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - vf->min_tx_rate = min_tx_rate; - vf->max_tx_rate = max_tx_rate; + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(max_tx_rate); + req->min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_req_send(bp, req); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } } return rc; } @@ -358,21 +378,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { - int i, rc = 0; + struct hwrm_func_vf_resc_free_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_func_vf_resc_free_input req = {0}; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { - req.vf_id = cpu_to_le16(i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); if (rc) break; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -446,51 +467,55 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { - struct hwrm_func_buf_rgtr_input req = {0}; + struct hwrm_func_buf_rgtr_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; - req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); - req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); - req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); - req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); - req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); - req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); - req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } -/* Caller holds bp->hwrm_cmd_lock mutex lock */ -static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; vf = &bp->pf.vf[vf_id]; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + req->fid = cpu_to_le16(vf->fw_fid); if (is_valid_ether_addr(vf->mac_addr)) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); } if (vf->vlan) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - req.dflt_vlan = cpu_to_le16(vf->vlan); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); } if (vf->max_tx_rate) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(vf->max_tx_rate); -#ifdef HAVE_IFLA_TX_RATE - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(vf->min_tx_rate); -#endif + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); + req->min_bw = cpu_to_le32(vf->min_tx_rate); } if (vf->flags & BNXT_VF_TRUST) - req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* Only called by PF to reserve resources for VFs, returns actual number of @@ -498,7 +523,7 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) */ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { - struct hwrm_func_vf_resource_cfg_input req = {0}; + struct hwrm_func_vf_resource_cfg_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; @@ -507,7 +532,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) u16 vf_msix = 0; u16 vf_rss; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); @@ -526,21 +553,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; - req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { min = 0; - req.min_rsscos_ctx = cpu_to_le16(min); + req->min_rsscos_ctx = cpu_to_le16(min); } if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { - req.min_cmpl_rings = cpu_to_le16(min); - req.min_tx_rings = cpu_to_le16(min); - req.min_rx_rings = cpu_to_le16(min); - req.min_l2_ctxs = cpu_to_le16(min); - req.min_vnics = cpu_to_le16(min); - req.min_stat_ctx = cpu_to_le16(min); + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) - req.min_hw_ring_grps = cpu_to_le16(min); + req->min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; @@ -550,56 +577,57 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_ring_grps /= num_vfs; vf_rss /= num_vfs; - req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.min_tx_rings = cpu_to_le16(vf_tx_rings); - req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.min_vnics = cpu_to_le16(vf_vnics); - req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.min_rsscos_ctx = cpu_to_le16(vf_rss); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); } - req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.max_tx_rings = cpu_to_le16(vf_tx_rings); - req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.max_vnics = cpu_to_le16(vf_vnics); - req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.max_rsscos_ctx = cpu_to_le16(vf_rss); + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); if (bp->flags & BNXT_FLAG_CHIP_P5) - req.max_msix = cpu_to_le16(vf_msix / num_vfs); + req->max_msix = cpu_to_le16(vf_msix / num_vfs); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { if (reset) __bnxt_set_vf_params(bp, i); - req.vf_id = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = pf->first_vf_id + i; } - mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { u16 n = pf->active_vfs; - hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; - hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; - hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * - n; - hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; - hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; - hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; - hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; if (bp->flags & BNXT_FLAG_CHIP_P5) hw_resc->max_irqs -= vf_msix * n; rc = pf->active_vfs; } + hwrm_req_drop(bp, req); return rc; } @@ -608,15 +636,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { - u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; int total_vf_tx_rings = 0; u16 vf_ring_grps; + u32 mtu, i; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; /* Remaining rings are distributed equally amongs VF's for now */ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; @@ -632,50 +663,49 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | - FUNC_CFG_REQ_ENABLES_MRU | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - req.mru = cpu_to_le16(mtu); - req.admin_mtu = cpu_to_le16(mtu); + req->mru = cpu_to_le16(mtu); + req->admin_mtu = cpu_to_le16(mtu); - req.num_rsscos_ctxs = cpu_to_le16(1); - req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.num_tx_rings = cpu_to_le16(vf_tx_rings); - req.num_rx_rings = cpu_to_le16(vf_rx_rings); - req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.num_l2_ctxs = cpu_to_le16(4); + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); - req.num_vnics = cpu_to_le16(vf_vnics); + req->num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ - req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { int vf_tx_rsvd = vf_tx_rings; - req.fid = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = le16_to_cpu(req.fid); + pf->vf[i].fw_fid = le16_to_cpu(req->fid); rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, &vf_tx_rsvd); if (rc) break; total_vf_tx_rings += vf_tx_rsvd; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -893,23 +923,24 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, void *encap_resp, __le64 encap_resp_addr, __le16 encap_resp_cpr, u32 msg_size) { - int rc = 0; - struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_input *req; + int rc; if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); - - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_len = cpu_to_le16(msg_size); - req.encap_resp_addr = encap_resp_addr; - req.encap_resp_cmpl_ring = encap_resp_cpr; - memcpy(req.encap_resp, encap_resp, msg_size); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); + + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); return rc; @@ -918,19 +949,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_input *req; + int rc; if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); return rc; @@ -939,19 +972,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_input *req; + int rc; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); return rc; @@ -1031,10 +1066,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) phy_qcfg_req = (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; - mutex_lock(&bp->hwrm_cmd_lock); + mutex_lock(&bp->link_lock); memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; phy_qcfg_resp.valid = 1; @@ -1118,7 +1153,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc = 0; if (!BNXT_VF(bp)) @@ -1129,10 +1164,16 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) rc = -EADDRNOTAVAIL; goto mac_done; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + if (!strict) + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + rc = hwrm_req_send(bp, req); mac_done: if (rc && strict) { rc = -EADDRNOTAVAIL; @@ -1145,15 +1186,17 @@ mac_done: void bnxt_update_vf_mac(struct bnxt *bp) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; bool inform_pf = false; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; + + req->fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) goto update_vf_mac_exit; /* Store MAC address from the firmware. There are 2 cases: @@ -1176,7 +1219,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (is_valid_ether_addr(bp->vf.mac_addr)) memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (inform_pf) bnxt_approve_mac(bp, bp->dev->dev_addr, false); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 5e4429b14b8c..46fae1acbeed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_sriov.h" #include "bnxt_tc.h" #include "bnxt_vfr.h" @@ -502,16 +503,18 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { - struct hwrm_cfa_flow_free_input req = { 0 }; + struct hwrm_cfa_flow_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.ext_flow_handle = flow_node->ext_flow_handle; - else - req.flow_handle = flow_node->flow_handle; + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); + if (!rc) { + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->ext_flow_handle = flow_node->ext_flow_handle; + else + req->flow_handle = flow_node->flow_handle; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -587,20 +590,22 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; - struct hwrm_cfa_flow_alloc_input req = { 0 }; struct hwrm_cfa_flow_alloc_output *resp; + struct hwrm_cfa_flow_alloc_input *req; u16 flow_flags = 0, action_flags = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); + if (rc) + return rc; - req.src_fid = cpu_to_le16(flow->src_fid); - req.ref_flow_handle = ref_flow_handle; + req->src_fid = cpu_to_le16(flow->src_fid); + req->ref_flow_handle = ref_flow_handle; if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { - memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac, + memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, ETH_ALEN); - memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac, + memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, ETH_ALEN); action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; @@ -615,71 +620,71 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.saddr.s_addr; /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.daddr.s_addr; /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } else { if (actions->nat.src_xlate) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.saddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.daddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } } if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { - req.tunnel_handle = tunnel_handle; + req->tunnel_handle = tunnel_handle; flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; } - req.ethertype = flow->l2_key.ether_type; - req.ip_proto = flow->l4_key.ip_proto; + req->ethertype = flow->l2_key.ether_type; + req->ip_proto = flow->l4_key.ip_proto; if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { - memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); - memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); } if (flow->l2_key.num_vlans > 0) { @@ -688,7 +693,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.) */ - req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ @@ -701,68 +706,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { - req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; - req.ip_dst_mask_len = + req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req->ip_dst_mask_len = inet_mask_len(l3_mask->ipv4.daddr.s_addr); - req.ip_src[0] = l3_key->ipv4.saddr.s_addr; - req.ip_src_mask_len = + req->ip_src[0] = l3_key->ipv4.saddr.s_addr; + req->ip_src_mask_len = inet_mask_len(l3_mask->ipv4.saddr.s_addr); } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { - memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, - sizeof(req.ip_dst)); - req.ip_dst_mask_len = + memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req->ip_dst)); + req->ip_dst_mask_len = ipv6_mask_len(&l3_mask->ipv6.daddr); - memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, - sizeof(req.ip_src)); - req.ip_src_mask_len = + memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req->ip_src)); + req->ip_src_mask_len = ipv6_mask_len(&l3_mask->ipv6.saddr); } } if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { - req.l4_src_port = flow->l4_key.ports.sport; - req.l4_src_port_mask = flow->l4_mask.ports.sport; - req.l4_dst_port = flow->l4_key.ports.dport; - req.l4_dst_port_mask = flow->l4_mask.ports.dport; + req->l4_src_port = flow->l4_key.ports.sport; + req->l4_src_port_mask = flow->l4_mask.ports.sport; + req->l4_dst_port = flow->l4_key.ports.dport; + req->l4_dst_port_mask = flow->l4_mask.ports.dport; } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { /* l4 ports serve as type/code when ip_proto is ICMP */ - req.l4_src_port = htons(flow->l4_key.icmp.type); - req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); - req.l4_dst_port = htons(flow->l4_key.icmp.code); - req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + req->l4_src_port = htons(flow->l4_key.icmp.type); + req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req->l4_dst_port = htons(flow->l4_key.icmp.code); + req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); } - req.flags = cpu_to_le16(flow_flags); + req->flags = cpu_to_le16(flow_flags); if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; } else { if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; - req.dst_fid = cpu_to_le16(actions->dst_fid); + req->dst_fid = cpu_to_le16(actions->dst_fid); } if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; - req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; - req.l2_rewrite_vlan_tci = actions->push_vlan_tci; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req->l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; /* Rewrite config with tpid = 0 implies vlan pop */ - req.l2_rewrite_vlan_tpid = 0; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = 0; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } } - req.action_flags = cpu_to_le16(action_flags); + req->action_flags = cpu_to_le16(action_flags); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); /* CFA_FLOW_ALLOC response interpretation: * fw with fw with * 16-bit 64-bit @@ -778,7 +782,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, flow_node->flow_id = resp->flow_id; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -788,67 +792,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct hwrm_cfa_decap_filter_alloc_output *resp; struct ip_tunnel_key *tun_key = &flow->tun_key; + struct hwrm_cfa_decap_filter_alloc_input *req; u32 enables = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC); + if (rc) + goto exit; - req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; - req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */ - req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; - ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req->dst_macaddr, l2_info->dmac); } if (l2_info->num_vlans) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; - req.t_ivlan_vid = l2_info->inner_vlan_tci; + req->t_ivlan_vid = l2_info->inner_vlan_tci; } enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; - req.ethertype = htons(ETH_P_IP); + req->ethertype = htons(ETH_P_IP); if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; - req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.dst_ipaddr[0] = tun_key->u.ipv4.dst; - req.src_ipaddr[0] = tun_key->u.ipv4.src; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->dst_ipaddr[0] = tun_key->u.ipv4.dst; + req->src_ipaddr[0] = tun_key->u.ipv4.src; } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; - req.dst_port = tun_key->tp_dst; + req->dst_port = tun_key->tp_dst; } /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. */ - req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; - req.enables = cpu_to_le32(enables); + req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req->enables = cpu_to_le32(enables); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *decap_filter_handle = resp->decap_filter_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -856,13 +862,14 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { - struct hwrm_cfa_decap_filter_free_input req = { 0 }; + struct hwrm_cfa_decap_filter_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); - req.decap_filter_id = decap_filter_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE); + if (!rc) { + req->decap_filter_id = decap_filter_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -874,18 +881,18 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_record_alloc_output *resp; - struct hwrm_cfa_encap_data_vxlan *encap = - (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; - struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = - (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + struct hwrm_cfa_encap_record_alloc_input *req; + struct hwrm_cfa_encap_data_vxlan *encap; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); - - req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC); + if (rc) + goto exit; + encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; + req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); if (l2_info->num_vlans) { @@ -894,6 +901,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->ovlan_tpid = l2_info->inner_vlan_tpid; } + encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; encap_ipv4->ttl = encap_key->ttl; @@ -905,15 +913,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *encap_record_handle = resp->encap_record_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -921,13 +928,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { - struct hwrm_cfa_encap_record_free_input req = { 0 }; + struct hwrm_cfa_encap_record_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); - req.encap_record_id = encap_record_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE); + if (!rc) { + req->encap_record_id = encap_record_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -1673,14 +1681,20 @@ static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct hwrm_cfa_flow_stats_input req = { 0 }; struct hwrm_cfa_flow_stats_output *resp; - __le16 *req_flow_handles = &req.flow_handle_0; - __le32 *req_flow_ids = &req.flow_id_0; + struct hwrm_cfa_flow_stats_input *req; + __le16 *req_flow_handles; + __le32 *req_flow_ids; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); - req.num_flows = cpu_to_le16(num_flows); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS); + if (rc) + goto exit; + + req_flow_handles = &req->flow_handle_0; + req_flow_ids = &req->flow_id_0; + + req->num_flows = cpu_to_le16(num_flows); for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; @@ -1688,13 +1702,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, &req_flow_handles[i], &req_flow_ids[i]); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *resp_packets; __le64 *resp_bytes; - resp = bnxt_get_hwrm_resp_addr(bp, &req); resp_packets = &resp->packet_0; resp_bytes = &resp->byte_0; @@ -1704,10 +1717,11 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, stats_batch[i].hw_stats.bytes = le64_to_cpu(resp_bytes[i]); } - } else { - netdev_info(bp->dev, "error rc=%d\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "error rc=%d\n", rc); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 187ff643ad2a..fde0c3e8ac57 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, @@ -237,27 +238,33 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct output *resp; struct input *req; + u32 resp_len; int rc; if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state) return -EBUSY; - mutex_lock(&bp->hwrm_cmd_lock); - req = fw_msg->msg; - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); - rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len, - fw_msg->timeout); - if (!rc) { - struct output *resp = bp->hwrm_cmd_resp_addr; - u32 len = le16_to_cpu(resp->resp_len); + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; - if (fw_msg->resp_max_len < len) - len = fw_msg->resp_max_len; + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; - memcpy(fw_msg->resp, resp, len); + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + resp_len = le16_to_cpu(resp->resp_len); + if (resp_len) { + if (fw_msg->resp_max_len < resp_len) + resp_len = fw_msg->resp_max_len; + + memcpy(fw_msg->resp, resp, resp_len); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index dd66302343a2..9401936b74fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -15,6 +15,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_tc.h" @@ -27,38 +28,40 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, u16 *tx_cfa_action, u16 *rx_cfa_code) { - struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_alloc_input req = { 0 }; + struct hwrm_cfa_vfr_alloc_output *resp; + struct hwrm_cfa_vfr_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); - req.vf_id = cpu_to_le16(vf_idx); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); if (!rc) { - *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); - *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); - netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", - *tx_cfa_action, *rx_cfa_code); - } else { - netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); + req->vf_id = cpu_to_le16(vf_idx); + sprintf(req->vfr_name, "vfr%d", vf_idx); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } + hwrm_req_drop(bp, req); } - - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) { - struct hwrm_cfa_vfr_free_input req = { 0 }; + struct hwrm_cfa_vfr_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); + if (!rc) { + sprintf(req->vfr_name, "vfr%d", vf_idx); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; @@ -67,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, u16 *max_mtu) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u16 mtu; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); - - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { mtu = le16_to_cpu(resp->max_mtu_configured); if (!mtu) @@ -85,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, else *max_mtu = mtu; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 5c368a9cbbbc..c2e1f163bb14 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { desc_ptp = macb_ptp_desc(bp, desc); + /* Unlikely but check */ + if (!desc_ptp) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not supported in BD\n"); + return; + } gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); @@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) return -ENOMEM; - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc_ptp = macb_ptp_desc(queue->bp, desc); + /* Unlikely but check */ + if (!desc_ptp) + return -EINVAL; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index efa6c98d7459..0d9cda4ab303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip) ret = -ENOMEM; goto bye; } + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); #endif params[0] = FW_PARAM_PFVF(CLIP_START); @@ -6781,13 +6782,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) setup_memwin(adapter); err = adap_init0(adapter, 0); -#ifdef CONFIG_DEBUG_FS - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); -#endif - setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; + setup_memwin_rdma(adapter); + /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index aa86a81c8f4a..c2bd2584201f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -9,7 +9,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ - HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 532523069d74..1ec91435d0b4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -38,9 +38,8 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = { }, }; -static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd); -static int hns3_dbg_common_file_init(struct hnae3_handle *handle, - unsigned int cmd); +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd); +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd); static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { { @@ -696,7 +695,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len) sprintf(result[j++], "%u", i); sprintf(result[j++], "%u", h->ae_algo->ops->get_global_queue_id(h, i)); - sprintf(result[j++], "%u", + sprintf(result[j++], "%d", priv->ring[i].tqp_vector->vector_irq); hns3_dbg_fill_content(content, sizeof(content), queue_map_items, (const char **)result, @@ -868,7 +867,7 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); - static const char * const str[] = {"no", "yes"}; + const char * const str[] = {"no", "yes"}; unsigned long *caps = ae_dev->caps; u32 i, state; @@ -938,20 +937,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) return 0; } -static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle, - const unsigned char *name, u32 *index) +static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { - if (!strncmp(name, hns3_dbg_cmd[i].name, - strlen(hns3_dbg_cmd[i].name))) { + if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { *index = i; return 0; } } - dev_err(&handle->pdev->dev, "unknown command(%s)\n", name); + dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", + dbg_data->cmd); return -EINVAL; } @@ -1019,8 +1017,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, u32 index; int ret; - ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname, - &index); + ret = hns3_dbg_get_cmd_index(dbg_data, &index); if (ret) return ret; @@ -1090,6 +1087,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) char name[HNS3_DBG_FILE_NAME_LEN]; data[i].handle = handle; + data[i].cmd = hns3_dbg_cmd[cmd].cmd; data[i].qid = i; sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); debugfs_create_file(name, 0400, entry_dir, &data[i], @@ -1110,6 +1108,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) return -ENOMEM; data->handle = handle; + data->cmd = hns3_dbg_cmd[cmd].cmd; entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, data, &hns3_dbg_fops); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index f3766ff38bb7..bd8801065e02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -22,6 +22,7 @@ struct hns3_dbg_item { struct hns3_dbg_data { struct hnae3_handle *handle; + enum hnae3_dbg_cmd cmd; u16 qid; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 39d01ca026da..0680d22485b9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -971,8 +971,7 @@ static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) /* The free tx buffer is divided into two part, so pick the * larger one. */ - return (ntc > (tx_spare->len - ntu) ? ntc : - (tx_spare->len - ntu)) - 1; + return max(ntc, tx_spare->len - ntu) - 1; } static void hns3_tx_spare_update(struct hns3_enet_ring *ring) @@ -5063,6 +5062,24 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, hns3_set_cq_period_mode(priv, rx_mode, false); } +static void hns3_state_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + + if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5166,16 +5183,7 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); - if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) - set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); - - if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) - set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); - - set_bit(HNS3_NIC_STATE_INITED, &priv->state); - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) - set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + hns3_state_init(handle); ret = register_netdev(netdev); if (ret) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index dfad9060c284..299802995091 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -348,7 +348,7 @@ enum hns3_pkt_l3type { HNS3_L3_TYPE_LLDP, HNS3_L3_TYPE_BPDU, HNS3_L3_TYPE_MAC_PAUSE, - HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */ /* reserved for 0xA~0xB */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index b8d9851aefc5..7ea511d59e91 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -298,33 +298,8 @@ out: return ret_val; } -/** - * hns3_self_test - self test - * @ndev: net device - * @eth_test: test cmd - * @data: test result - */ -static void hns3_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) +static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; - bool if_running = netif_running(ndev); - int test_index = 0; - u32 i; - - if (hns3_nic_resetting(ndev)) { - netdev_err(ndev, "dev resetting!"); - return; - } - - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) - return; - - netif_dbg(h, drv, ndev, "self test start"); - st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -341,6 +316,18 @@ static void hns3_self_test(struct net_device *ndev, st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; st_param[HNAE3_LOOP_PHY][1] = h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; +} + +static void hns3_selftest_prepare(struct net_device *ndev, + bool if_running, int (*st_param)[2]) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + hns3_set_selftest_param(h, st_param); if (if_running) ndev->netdev_ops->ndo_stop(ndev); @@ -359,6 +346,35 @@ static void hns3_self_test(struct net_device *ndev, h->ae_algo->ops->halt_autoneg(h, true); set_bit(HNS3_NIC_STATE_TESTING, &priv->state); +} + +static void hns3_selftest_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, false); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (h->ae_algo->ops->enable_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + + if (if_running) + ndev->netdev_ops->ndo_open(ndev); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); +} + +static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], + struct ethtool_test *eth_test, u64 *data) +{ + int test_index = 0; + u32 i; for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; @@ -377,21 +393,32 @@ static void hns3_self_test(struct net_device *ndev, test_index++; } +} - clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - if (h->ae_algo->ops->halt_autoneg) - h->ae_algo->ops->halt_autoneg(h, false); +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; + bool if_running = netif_running(ndev); -#if IS_ENABLED(CONFIG_VLAN_8021Q) - if (h->ae_algo->ops->enable_vlan_filter) - h->ae_algo->ops->enable_vlan_filter(h, true); -#endif + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } - if (if_running) - ndev->netdev_ops->ndo_open(ndev); + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; - netif_dbg(h, drv, ndev, "self test end\n"); + hns3_selftest_prepare(ndev, if_running, st_param); + hns3_do_selftest(ndev, st_param, eth_test, data); + hns3_selftest_restore(ndev, if_running); } static void hns3_update_limit_promisc_mode(struct net_device *netdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 13042f1cac6f..474c6d1664e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -362,41 +362,34 @@ static void hclge_set_default_capability(struct hclge_dev *hdev) } } +const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = { + {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, + {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, + {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, + {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, + {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, + {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, + {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, +}; + static void hclge_parse_capability(struct hclge_dev *hdev, struct hclge_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B)) - set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) - set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B)) - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B)) - set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) { - set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); - } + for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclge_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclge_build_api_caps(void) @@ -575,9 +568,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw) void hclge_cmd_uninit(struct hclge_dev *hdev) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); hclge_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 8e5be127909b..33244472e0d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -9,6 +9,7 @@ #include "hnae3.h" #define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGE_DESC_DATA_LEN 6 struct hclge_dev; @@ -270,6 +271,9 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, @@ -449,7 +453,7 @@ struct hclge_tc_thrd { }; struct hclge_priv_buf { - struct hclge_waterline wl; /* Waterline for low and high*/ + struct hclge_waterline wl; /* Waterline for low and high */ u32 buf_size; /* TC private buffer size */ u32 tx_buf_size; u32 enable; /* Enable TC private buffer or not */ @@ -1013,16 +1017,6 @@ struct hclge_common_lb_cmd { #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 -#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGE_NIC_SW_RST_RDY_B 16 @@ -1197,6 +1191,19 @@ struct hclge_dev_specs_1_cmd { u8 rsv1[18]; }; +/* mac speed type defined in firmware command */ +enum HCLGE_FIRMWARE_MAC_SPEED { + HCLGE_FW_MAC_SPEED_1G, + HCLGE_FW_MAC_SPEED_10G, + HCLGE_FW_MAC_SPEED_25G, + HCLGE_FW_MAC_SPEED_40G, + HCLGE_FW_MAC_SPEED_50G, + HCLGE_FW_MAC_SPEED_100G, + HCLGE_FW_MAC_SPEED_10M, + HCLGE_FW_MAC_SPEED_100M, + HCLGE_FW_MAC_SPEED_200G, +}; + #define HCLGE_PHY_LINK_SETTING_BD_NUM 2 struct hclge_phy_link_ksetting_0_cmd { @@ -1227,6 +1234,12 @@ struct hclge_phy_reg_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclge_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 5bf5db91d16c..4a619e5d3f35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -104,26 +104,30 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, return 0; } -static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, - u8 *tc, bool *changed) +static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, + bool *changed) { - bool has_ets_tc = false; - u32 total_ets_bw = 0; - u8 max_tc = 0; - int ret; + u8 max_tc_id = 0; u8 i; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] > max_tc_id) + max_tc_id = ets->prio_tc[i]; } - ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); - if (ret) - return ret; + /* return max tc number, max tc id need to plus 1 */ + return max_tc_id + 1; +} + +static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, + struct ieee_ets *ets, bool *changed) +{ + bool has_ets_tc = false; + u32 total_ets_bw = 0; + u8 i; for (i = 0; i < hdev->tc_max; i++) { switch (ets->tc_tsa[i]) { @@ -148,7 +152,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; - *tc = max_tc + 1; + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u8 tc_num; + int ret; + + tc_num = hclge_ets_tc_changed(hdev, ets, changed); + + ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc); + if (ret) + return ret; + + ret = hclge_ets_sch_mode_validate(hdev, ets, changed); + if (ret) + return ret; + + *tc = tc_num; if (*tc != hdev->tm_info.num_tc) *changed = true; @@ -234,9 +257,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) goto err_out; - ret = hclge_notify_init_up(hdev); - if (ret) - return ret; + return hclge_notify_init_up(hdev); } return hclge_tm_dwrr_cfg(hdev); @@ -255,21 +276,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; - u8 i, j, pfc_map, *prio_tc; int ret; + u8 i; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; - prio_tc = hdev->tm_info.prio_tc; - pfc_map = hdev->tm_info.hw_pfc_map; - - /* Pfc setting is based on TC */ - for (i = 0; i < hdev->tm_info.num_tc; i++) { - for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { - if ((prio_tc[j] == i) && (pfc_map & BIT(i))) - pfc->pfc_en |= BIT(j); - } - } + pfc->pfc_en = hdev->tm_info.pfc_en; ret = hclge_pfc_tx_stats_get(hdev, requests); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 288788186ecc..68ed1715ac52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -926,26 +926,45 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) return 0; } +static const struct hclge_dbg_item tm_pri_items[] = { + { "ID", 4 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) { - struct hclge_tm_shaper_para c_shaper_para; - struct hclge_tm_shaper_para p_shaper_para; - u8 pri_num, sch_mode, weight; - char *sch_mode_str; - int pos = 0; - int ret; - u8 i; + char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; + char content[HCLGE_DBG_TM_INFO_LEN]; + u8 pri_num, sch_mode, weight, i, j; + int pos, ret; ret = hclge_tm_get_pri_num(hdev, &pri_num); if (ret) return ret; - pos += scnprintf(buf + pos, len - pos, - "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); - pos += scnprintf(buf + pos, len - pos, - "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); - pos += scnprintf(buf + pos, len - pos, - "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + NULL, ARRAY_SIZE(tm_pri_items)); + pos = scnprintf(buf, len, "%s", content); for (i = 0; i < pri_num; i++) { ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); @@ -971,21 +990,16 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : "sp"; - pos += scnprintf(buf + pos, len - pos, - "%04u %4s %3u %3u %3u %3u ", - i, sch_mode_str, weight, c_shaper_para.ir_b, - c_shaper_para.ir_u, c_shaper_para.ir_s); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %1u %6u ", - c_shaper_para.bs_b, c_shaper_para.bs_s, - c_shaper_para.flag, c_shaper_para.rate); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %3u %3u %3u ", - p_shaper_para.ir_b, p_shaper_para.ir_u, - p_shaper_para.ir_s, p_shaper_para.bs_b, - p_shaper_para.bs_s); - pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", - p_shaper_para.flag, p_shaper_para.rate); + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + (const char **)result, + ARRAY_SIZE(tm_pri_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index ec9a7f8bc3fe..718c16d686fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,468 +4,895 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(19), + .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(21), + .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(23), + .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(25), + .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(27), + .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(29), + .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "rd_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "wr_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "reg_search_miss", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(5), + .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ - .reset_level = HNAE3_GLOBAL_RESET } +{ \ + .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET \ +} static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -504,131 +931,269 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(9), + .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { - { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, - { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, - { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, - { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, - { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, - { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, - { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, - { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, - { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, - { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, - { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, - { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, - { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, - { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, - { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, - { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, - { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, - { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, - { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, - { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, - { /* sentinel */ } + { + .int_msk = 0, + .msg = "rocee qmm ovf: sgid invalid err" + }, { + .int_msk = 0x4, + .msg = "rocee qmm ovf: sgid ovf err" + }, { + .int_msk = 0x8, + .msg = "rocee qmm ovf: smac invalid err" + }, { + .int_msk = 0xC, + .msg = "rocee qmm ovf: smac ovf err" + }, { + .int_msk = 0x10, + .msg = "rocee qmm ovf: cqc invalid err" + }, { + .int_msk = 0x11, + .msg = "rocee qmm ovf: cqc ovf err" + }, { + .int_msk = 0x12, + .msg = "rocee qmm ovf: cqc hopnum err" + }, { + .int_msk = 0x13, + .msg = "rocee qmm ovf: cqc ba0 err" + }, { + .int_msk = 0x14, + .msg = "rocee qmm ovf: srqc invalid err" + }, { + .int_msk = 0x15, + .msg = "rocee qmm ovf: srqc ovf err" + }, { + .int_msk = 0x16, + .msg = "rocee qmm ovf: srqc hopnum err" + }, { + .int_msk = 0x17, + .msg = "rocee qmm ovf: srqc ba0 err" + }, { + .int_msk = 0x18, + .msg = "rocee qmm ovf: mpt invalid err" + }, { + .int_msk = 0x19, + .msg = "rocee qmm ovf: mpt ovf err" + }, { + .int_msk = 0x1A, + .msg = "rocee qmm ovf: mpt hopnum err" + }, { + .int_msk = 0x1B, + .msg = "rocee qmm ovf: mpt ba0 err" + }, { + .int_msk = 0x1C, + .msg = "rocee qmm ovf: qpc invalid err" + }, { + .int_msk = 0x1D, + .msg = "rocee qmm ovf: qpc ovf err" + }, { + .int_msk = 0x1E, + .msg = "rocee qmm ovf: qpc hopnum err" + }, { + .int_msk = 0x1F, + .msg = "rocee qmm ovf: qpc ba0 err" + }, { + /* sentinel */ + } }; static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { @@ -1709,34 +2274,36 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) static const struct hclge_hw_blk hw_blk[] = { { - .msk = BIT(0), .name = "IGU_EGU", - .config_err_int = hclge_config_igu_egu_hw_err_int, - }, - { - .msk = BIT(1), .name = "PPP", - .config_err_int = hclge_config_ppp_hw_err_int, - }, - { - .msk = BIT(2), .name = "SSU", - .config_err_int = hclge_config_ssu_hw_err_int, - }, - { - .msk = BIT(3), .name = "PPU", - .config_err_int = hclge_config_ppu_hw_err_int, - }, - { - .msk = BIT(4), .name = "TM", - .config_err_int = hclge_config_tm_hw_err_int, - }, - { - .msk = BIT(5), .name = "COMMON", - .config_err_int = hclge_config_common_hw_err_int, - }, - { - .msk = BIT(8), .name = "MAC", - .config_err_int = hclge_config_mac_err_int, - }, - { /* sentinel */ } + .msk = BIT(0), + .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, { + .msk = BIT(1), + .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, { + .msk = BIT(2), + .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, { + .msk = BIT(3), + .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, { + .msk = BIT(4), + .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, { + .msk = BIT(5), + .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, { + .msk = BIT(8), + .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { + /* sentinel */ + } }; static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f6882090d38e..fb1c33cac2a8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -92,23 +92,23 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, - HCLGE_CMDQ_TX_ADDR_H_REG, - HCLGE_CMDQ_TX_DEPTH_REG, - HCLGE_CMDQ_TX_TAIL_REG, - HCLGE_CMDQ_TX_HEAD_REG, - HCLGE_CMDQ_RX_ADDR_L_REG, - HCLGE_CMDQ_RX_ADDR_H_REG, - HCLGE_CMDQ_RX_DEPTH_REG, - HCLGE_CMDQ_RX_TAIL_REG, - HCLGE_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG, + HCLGE_NIC_CSQ_BASEADDR_H_REG, + HCLGE_NIC_CSQ_DEPTH_REG, + HCLGE_NIC_CSQ_TAIL_REG, + HCLGE_NIC_CSQ_HEAD_REG, + HCLGE_NIC_CRQ_BASEADDR_L_REG, + HCLGE_NIC_CRQ_BASEADDR_H_REG, + HCLGE_NIC_CRQ_DEPTH_REG, + HCLGE_NIC_CRQ_TAIL_REG, + HCLGE_NIC_CRQ_HEAD_REG, HCLGE_VECTOR0_CMDQ_SRC_REG, HCLGE_CMDQ_INTR_STS_REG, HCLGE_CMDQ_INTR_EN_REG, HCLGE_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, - HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_PF_OTHER_INT_REG, HCLGE_MISC_RESET_STS_REG, HCLGE_MISC_VECTOR_INT_STS, HCLGE_GLOBAL_RESET_REG, @@ -959,31 +959,31 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) static int hclge_parse_speed(u8 speed_cmd, u32 *speed) { switch (speed_cmd) { - case 6: + case HCLGE_FW_MAC_SPEED_10M: *speed = HCLGE_MAC_SPEED_10M; break; - case 7: + case HCLGE_FW_MAC_SPEED_100M: *speed = HCLGE_MAC_SPEED_100M; break; - case 0: + case HCLGE_FW_MAC_SPEED_1G: *speed = HCLGE_MAC_SPEED_1G; break; - case 1: + case HCLGE_FW_MAC_SPEED_10G: *speed = HCLGE_MAC_SPEED_10G; break; - case 2: + case HCLGE_FW_MAC_SPEED_25G: *speed = HCLGE_MAC_SPEED_25G; break; - case 3: + case HCLGE_FW_MAC_SPEED_40G: *speed = HCLGE_MAC_SPEED_40G; break; - case 4: + case HCLGE_FW_MAC_SPEED_50G: *speed = HCLGE_MAC_SPEED_50G; break; - case 5: + case HCLGE_FW_MAC_SPEED_100G: *speed = HCLGE_MAC_SPEED_100G; break; - case 8: + case HCLGE_FW_MAC_SPEED_200G: *speed = HCLGE_MAC_SPEED_200G; break; default: @@ -993,44 +993,43 @@ static int hclge_parse_speed(u8 speed_cmd, u32 *speed) return 0; } +static const struct hclge_speed_bit_map speed_bit_map[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, + {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, + {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, + {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, + {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, + {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, +}; + +static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 speed_ability = hdev->hw.mac.speed_ability; u32 speed_bit = 0; + int ret; - switch (speed) { - case HCLGE_MAC_SPEED_10M: - speed_bit = HCLGE_SUPPORT_10M_BIT; - break; - case HCLGE_MAC_SPEED_100M: - speed_bit = HCLGE_SUPPORT_100M_BIT; - break; - case HCLGE_MAC_SPEED_1G: - speed_bit = HCLGE_SUPPORT_1G_BIT; - break; - case HCLGE_MAC_SPEED_10G: - speed_bit = HCLGE_SUPPORT_10G_BIT; - break; - case HCLGE_MAC_SPEED_25G: - speed_bit = HCLGE_SUPPORT_25G_BIT; - break; - case HCLGE_MAC_SPEED_40G: - speed_bit = HCLGE_SUPPORT_40G_BIT; - break; - case HCLGE_MAC_SPEED_50G: - speed_bit = HCLGE_SUPPORT_50G_BIT; - break; - case HCLGE_MAC_SPEED_100G: - speed_bit = HCLGE_SUPPORT_100G_BIT; - break; - case HCLGE_MAC_SPEED_200G: - speed_bit = HCLGE_SUPPORT_200G_BIT; - break; - default: - return -EINVAL; - } + ret = hclge_get_speed_bit(speed, &speed_bit); + if (ret) + return ret; if (speed_bit & speed_ability) return 0; @@ -1551,6 +1550,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -1619,7 +1619,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_config_gro(struct hclge_dev *hdev, bool en) +static int hclge_config_gro(struct hclge_dev *hdev) { struct hclge_cfg_gro_status_cmd *req; struct hclge_desc desc; @@ -1631,7 +1631,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -2581,39 +2581,39 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, switch (speed) { case HCLGE_MAC_SPEED_10M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M); break; case HCLGE_MAC_SPEED_100M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M); break; case HCLGE_MAC_SPEED_1G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G); break; case HCLGE_MAC_SPEED_10G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G); break; case HCLGE_MAC_SPEED_25G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G); break; case HCLGE_MAC_SPEED_40G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G); break; case HCLGE_MAC_SPEED_50G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G); break; case HCLGE_MAC_SPEED_100G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G); break; case HCLGE_MAC_SPEED_200G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 8); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); @@ -2954,12 +2954,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, state); - hdev->hw.mac.link = state; hclge_push_link_status(hdev); } @@ -3421,7 +3421,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) hclge_enable_vector(&hdev->misc_vector, false); event_cause = hclge_check_event_cause(hdev, &clearval); - /* vector 0 interrupt is shared with reset and mailbox source events.*/ + /* vector 0 interrupt is shared with reset and mailbox source events. */ switch (event_cause) { case HCLGE_VECTOR0_EVENT_ERR: hclge_errhand_task_schedule(hdev); @@ -10081,7 +10081,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { - struct hclge_vport_vlan_cfg *vlan; + struct hclge_vport_vlan_cfg *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id == vlan_id) + return; vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) @@ -11451,6 +11455,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev) } } +static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) { if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) @@ -11504,6 +11530,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_cmd_uninit; + ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + ret = hclge_get_cap(hdev); if (ret) goto err_cmd_uninit; @@ -11568,7 +11598,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) goto err_mdiobus_unreg; @@ -11951,7 +11981,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) return ret; @@ -12686,8 +12716,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret; - return hclge_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; } static void hclge_sync_promisc_mode(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ada5c68f2851..de6afbcbfbac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -38,22 +38,22 @@ #define HCLGE_VECTOR_REG_OFFSET_H 0x1000 #define HCLGE_VECTOR_VF_OFFSET 0x100000 -#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGE_CMDQ_INTR_STS_REG 0x27104 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C /* bar registers for common func */ -#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 #define HCLGE_GRO_EN_REG 0x28000 #define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008 @@ -929,6 +929,7 @@ struct hclge_dev { unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; + bool gro_en; u16 wanted_umv_size; /* max available unicast mac vlan space */ @@ -1057,6 +1058,11 @@ struct hclge_vport { struct list_head vlan_list; /* Store VF vlan table */ }; +struct hclge_speed_bit_map { + u32 speed; + u32 speed_bit; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index c0a478ae9583..0315d8312af3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -66,6 +66,8 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, resp_msg->len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index dbf5f4c08019..7a9b77de632a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -127,7 +127,7 @@ static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info) } bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); -void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev); +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index bd19a2d89f6c..59772b0e9531 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -71,7 +71,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) static bool hclgevf_is_special_opcode(u16 opcode) { - static const u16 spec_opcode[] = {0x30, 0x31, 0x32}; + const u16 spec_opcode[] = {0x30, 0x31, 0x32}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -342,25 +342,26 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); } +const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = { + {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, +}; + static void hclgevf_parse_capability(struct hclgevf_dev *hdev, struct hclgevf_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); + for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclgevf_build_api_caps(void) @@ -507,12 +508,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); hclgevf_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 202feb70dba5..39d0b589c720 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -8,6 +8,7 @@ #include "hnae3.h" #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 +#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 @@ -265,16 +266,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_TYPE_CRQ 0 #define HCLGEVF_TYPE_CSQ 1 -#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGEVF_NIC_SW_RST_RDY_B 16 @@ -305,6 +296,12 @@ struct hclgevf_dev_specs_1_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclgevf_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index ff651739f16b..82e727020120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -40,16 +40,16 @@ static const u8 hclgevf_hash_key[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, - HCLGEVF_CMDQ_TX_ADDR_H_REG, - HCLGEVF_CMDQ_TX_DEPTH_REG, - HCLGEVF_CMDQ_TX_TAIL_REG, - HCLGEVF_CMDQ_TX_HEAD_REG, - HCLGEVF_CMDQ_RX_ADDR_L_REG, - HCLGEVF_CMDQ_RX_ADDR_H_REG, - HCLGEVF_CMDQ_RX_DEPTH_REG, - HCLGEVF_CMDQ_RX_TAIL_REG, - HCLGEVF_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG, + HCLGEVF_NIC_CSQ_BASEADDR_H_REG, + HCLGEVF_NIC_CSQ_DEPTH_REG, + HCLGEVF_NIC_CSQ_TAIL_REG, + HCLGEVF_NIC_CSQ_HEAD_REG, + HCLGEVF_NIC_CRQ_BASEADDR_L_REG, + HCLGEVF_NIC_CRQ_BASEADDR_H_REG, + HCLGEVF_NIC_CRQ_DEPTH_REG, + HCLGEVF_NIC_CRQ_TAIL_REG, + HCLGEVF_NIC_CRQ_HEAD_REG, HCLGEVF_VECTOR0_CMDQ_SRC_REG, HCLGEVF_VECTOR0_CMDQ_STATE_REG, HCLGEVF_CMDQ_INTR_EN_REG, @@ -507,10 +507,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; } clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@ -1963,7 +1963,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -2489,6 +2489,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->gro_en = true; + ret = hclgevf_get_basic_info(hdev); if (ret) return ret; @@ -2551,7 +2553,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; struct hclgevf_desc desc; @@ -2564,7 +2566,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -3310,7 +3312,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret; @@ -3395,7 +3397,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config; @@ -3647,8 +3649,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret; - return hclgevf_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; } static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 6f222a3a0bf2..883130a9b48f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -33,16 +33,17 @@ #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 /* bar registers for cmdq */ -#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 #define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C @@ -311,11 +312,12 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; + bool gro_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; struct hclgevf_mac_table_cfg mac_table; - bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 772b2f8acd2e..fdc66fae0960 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -155,18 +155,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%u)\n", + req->msg.vf_mbx_msg_code); + + resp->origin_mbx_msg = + (req->msg.vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; + resp->resp_status = + hclgevf_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, req->msg.resp_data, + HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + if (req->match_id) { + /* If match_id is not zero, it means PF support match_id. + * if the match_id is right, VF get the right response, or + * ignore the response. and driver will clear hdev->mbx_resp + * when send next message which need response. + */ + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } +} + +static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%u)\n", + req->msg.code); + return; + } + + /* tail the async message in arq */ + memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + atomic_inc(&hdev->arq.count); + + hclgevf_mbx_task_schedule(hdev); +} + void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { - struct hclgevf_mbx_resp_status *resp; struct hclge_mbx_pf_to_vf_cmd *req; struct hclgevf_cmq_ring *crq; struct hclgevf_desc *desc; - u16 *msg_q; u16 flag; - u8 *temp; - int i; - resp = &hdev->mbx_resp; crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { @@ -200,69 +248,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) */ switch (req->msg.code) { case HCLGE_MBX_PF_VF_RESP: - if (resp->received_resp) - dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - resp->received_resp = true; - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); - - temp = (u8 *)req->msg.resp_data; - for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { - resp->additional_info[i] = *temp; - temp++; - } - - /* If match_id is not zero, it means PF support - * match_id. If the match_id is right, VF get the - * right response, otherwise ignore the response. - * Driver will clear hdev->mbx_resp when send - * next message which need response. - */ - if (req->match_id) { - if (req->match_id == resp->match_id) - resp->received_resp = true; - } else { - resp->received_resp = true; - } + hclgevf_handle_mbx_response(hdev, req); break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_PUSH_VLAN_INFO: case HCLGE_MBX_PUSH_PROMISC_INFO: - /* set this mbx event as pending. This is required as we - * might loose interrupt event when mbx task is busy - * handling. This shall be cleared when mbx task just - * enters handling state. - */ - hdev->mbx_event_pending = true; - - /* we will drop the async msg if we find ARQ as full - * and continue with next message - */ - if (atomic_read(&hdev->arq.count) >= - HCLGE_MBX_MAX_ARQ_MSG_NUM) { - dev_warn(&hdev->pdev->dev, - "Async Q full, dropping msg(%u)\n", - req->msg.code); - break; - } - - /* tail the async message in arq */ - msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], &req->msg, - HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); - hclge_mbx_tail_ptr_move_arq(hdev->arq); - atomic_inc(&hdev->arq.count); - - hclgevf_mbx_task_schedule(hdev); - + hclgevf_handle_mbx_msg(hdev, req); break; default: dev_err(&hdev->pdev->dev, @@ -298,11 +291,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 flag; u8 idx; - /* we can safely clear it now as we are at start of the async message - * processing - */ - hdev->mbx_event_pending = false; - tail = hdev->arq.tail; /* process all the async queue messages */ @@ -323,8 +311,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) flag = (u8)msg_q[5]; /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + hclgevf_update_link_status(hdev, link_status); if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 2f97c9f5611d..60c582a16821 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1009,6 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { @@ -1062,7 +1064,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); - if (lat_enc > max_ltr_enc) + lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((lat_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((max_ltr_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; } @@ -4124,13 +4136,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - data |= valid_csum_mask; - ret_val = e1000_write_nvm(hw, word, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; + e_dbg("NVM Checksum Invalid\n"); + + if (hw->mac.type < e1000_pch_cnp) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } } return e1000e_validate_nvm_checksum_generic(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 9b145f6248a8..d6a092e5ee74 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -277,8 +277,11 @@ /* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 +#define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_SCALE_SHIFT 10 +#define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 8c863d64930b..14afce82ef63 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) - return -EIO; + /* We failed to locate the PBA, so just skip this entry */ + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", + ice_stat_str(status)); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 2d17a6da63cf..3e386c38d016 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -98,6 +98,13 @@ struct igc_ring { u32 start_time; u32 end_time; + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + /* everything past this point are written often */ u16 next_to_clean; u16 next_to_use; @@ -290,6 +297,10 @@ extern char igc_driver_name[]; #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index c40563350a5e..a4bbee748798 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -518,6 +518,14 @@ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 #define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 /* Receive Checksum Control */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index db1c63e8802a..b877efae61df 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -120,7 +120,7 @@ void igc_reset(struct igc_adapter *adapter) igc_ptp_reset(adapter); /* Re-enable TSN offloading, where applicable. */ - igc_tsn_offload_apply(adapter); + igc_tsn_reset(adapter); igc_get_phy_info(hw); } @@ -151,6 +151,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl_ext; + if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@ -4765,26 +4768,29 @@ void igc_down(struct igc_adapter *adapter) igc_ptp_suspend(adapter); - /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); - igc_irq_disable(adapter); + igc_irq_disable(adapter); + } adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; @@ -5743,7 +5749,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, bool enable) { struct igc_ring *ring; - int i; if (queue < 0 || queue >= adapter->num_tx_queues) return -EINVAL; @@ -5751,17 +5756,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, ring = adapter->tx_ring[queue]; ring->launchtime_enable = enable; - if (adapter->base_time) - return 0; - - adapter->cycle_time = NSEC_PER_SEC; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - ring->start_time = 0; - ring->end_time = NSEC_PER_SEC; - } - return 0; } @@ -5806,7 +5800,7 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->command != TC_TAPRIO_CMD_SET_GATES) return false; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { if (e->gate_mask & BIT(i)) queue_uses[i]++; @@ -5834,16 +5828,31 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { u32 start_time = 0, end_time = 0; size_t n; - if (!qopt->enable) { - adapter->base_time = 0; - return 0; - } + if (!qopt->enable) + return igc_tsn_clear_schedule(adapter); if (adapter->base_time) return -EALREADY; @@ -5863,7 +5872,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, end_time += e->interval; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) @@ -5895,6 +5904,74 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -5907,6 +5984,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_ETF: return igc_tsn_enable_launchtime(adapter, type_data); + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + default: return -EOPNOTSUPP; } @@ -6333,6 +6413,8 @@ static int igc_probe(struct pci_dev *pdev, igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index f6848181cdbd..0f021909b430 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -1000,7 +1000,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - igc_ptp_time_save(adapter); + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); } /** diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index dbba2eb2a247..e197a33d93a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -236,6 +236,9 @@ #define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) #define IGC_DTXMXPKTSZ 0x355C +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + /* System Time Registers */ #define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ #define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 4dbbb8a32ce9..0fce22de2ab8 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -18,8 +18,38 @@ static bool is_any_launchtime(struct igc_adapter *adapter) return false; } +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->base_time) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + /* Returns the TSN specific registers to their default values after - * TSN offloading is disabled. + * the adapter is reset. */ static int igc_tsn_disable_offload(struct igc_adapter *adapter) { @@ -27,11 +57,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) u32 tqavctrl; int i; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)) - return 0; - - adapter->cycle_time = 0; - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); @@ -41,12 +66,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { - struct igc_ring *ring = adapter->tx_ring[i]; - - ring->start_time = 0; - ring->end_time = 0; - ring->launchtime_enable = false; - wr32(IGC_TXQCTL(i), 0); wr32(IGC_STQT(i), 0); wr32(IGC_ENDQT(i), NSEC_PER_SEC); @@ -68,9 +87,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ktime_t base_time, systim; int i; - if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) - return 0; - cycle = adapter->cycle_time; base_time = adapter->base_time; @@ -88,6 +104,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); @@ -105,6 +123,90 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } @@ -125,33 +227,41 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_BASET_H, baset_h); wr32(IGC_BASET_L, baset_l); - adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED; - return 0; } -int igc_tsn_offload_apply(struct igc_adapter *adapter) +int igc_tsn_reset(struct igc_adapter *adapter) { - bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter); + unsigned int new_flags; + int err = 0; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled) - return 0; + new_flags = igc_tsn_new_flags(adapter); - if (!is_any_enabled) { - int err = igc_tsn_disable_offload(adapter); + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); - if (err < 0) - return err; + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; - /* The BASET registers aren't cleared when writing - * into them, force a reset if the interface is - * running. - */ - if (netif_running(adapter->netdev)) - schedule_work(&adapter->reset_task); + adapter->flags = new_flags; + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + int err; + + if (netif_running(adapter->netdev)) { + schedule_work(&adapter->reset_task); return 0; } - return igc_tsn_enable_offload(adapter); + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = igc_tsn_new_flags(adapter); + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index f76bc86ddccd..1512307f5a52 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -5,5 +5,6 @@ #define _IGC_TSN_H_ int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig new file mode 100644 index 000000000000..265dba414b41 --- /dev/null +++ b/drivers/net/ethernet/litex/Kconfig @@ -0,0 +1,27 @@ +# +# LiteX device configuration +# + +config NET_VENDOR_LITEX + bool "LiteX devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about LiteX devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_LITEX + +config LITEX_LITEETH + tristate "LiteX Ethernet support" + help + If you wish to compile a kernel for hardware with a LiteX LiteEth + device then you should answer Y to this. + + LiteX is a soft system-on-chip that targets FPGAs. LiteETH is a basic + network device that is commonly used in LiteX designs. + +endif # NET_VENDOR_LITEX diff --git a/drivers/net/ethernet/litex/Makefile b/drivers/net/ethernet/litex/Makefile new file mode 100644 index 000000000000..9343b73b8e49 --- /dev/null +++ b/drivers/net/ethernet/litex/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the LiteX network device drivers. +# + +obj-$(CONFIG_LITEX_LITEETH) += litex_liteeth.o diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c new file mode 100644 index 000000000000..10e6f2dedfad --- /dev/null +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LiteX Liteeth Ethernet + * + * Copyright 2017 Joel Stanley <[email protected]> + * + */ + +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/litex.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> + +#define LITEETH_WRITER_SLOT 0x00 +#define LITEETH_WRITER_LENGTH 0x04 +#define LITEETH_WRITER_ERRORS 0x08 +#define LITEETH_WRITER_EV_STATUS 0x0C +#define LITEETH_WRITER_EV_PENDING 0x10 +#define LITEETH_WRITER_EV_ENABLE 0x14 +#define LITEETH_READER_START 0x18 +#define LITEETH_READER_READY 0x1C +#define LITEETH_READER_LEVEL 0x20 +#define LITEETH_READER_SLOT 0x24 +#define LITEETH_READER_LENGTH 0x28 +#define LITEETH_READER_EV_STATUS 0x2C +#define LITEETH_READER_EV_PENDING 0x30 +#define LITEETH_READER_EV_ENABLE 0x34 +#define LITEETH_PREAMBLE_CRC 0x38 +#define LITEETH_PREAMBLE_ERRORS 0x3C +#define LITEETH_CRC_ERRORS 0x40 + +#define LITEETH_PHY_CRG_RESET 0x00 +#define LITEETH_MDIO_W 0x04 +#define LITEETH_MDIO_R 0x0C + +#define DRV_NAME "liteeth" + +struct liteeth { + void __iomem *base; + struct net_device *netdev; + struct device *dev; + u32 slot_size; + + /* Tx */ + u32 tx_slot; + u32 num_tx_slots; + void __iomem *tx_base; + + /* Rx */ + u32 rx_slot; + u32 num_rx_slots; + void __iomem *rx_base; +}; + +static int liteeth_rx(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + struct sk_buff *skb; + unsigned char *data; + u8 rx_slot; + int len; + + rx_slot = litex_read8(priv->base + LITEETH_WRITER_SLOT); + len = litex_read32(priv->base + LITEETH_WRITER_LENGTH); + + if (len == 0 || len > 2048) + goto rx_drop; + + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) { + netdev_err(netdev, "couldn't get memory\n"); + goto rx_drop; + } + + data = skb_put(skb, len); + memcpy_fromio(data, priv->rx_base + rx_slot * priv->slot_size, len); + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + return netif_rx(skb); + +rx_drop: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + + return NET_RX_DROP; +} + +static irqreturn_t liteeth_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct liteeth *priv = netdev_priv(netdev); + u8 reg; + + reg = litex_read8(priv->base + LITEETH_READER_EV_PENDING); + if (reg) { + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, reg); + } + + reg = litex_read8(priv->base + LITEETH_WRITER_EV_PENDING); + if (reg) { + liteeth_rx(netdev); + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, reg); + } + + return IRQ_HANDLED; +} + +static int liteeth_open(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + int err; + + /* Clear pending events */ + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, 1); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, 1); + + err = request_irq(netdev->irq, liteeth_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", netdev->irq); + return err; + } + + /* Enable IRQs */ + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 1); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 1); + + netif_carrier_on(netdev); + netif_start_queue(netdev); + + return 0; +} + +static int liteeth_stop(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 0); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 0); + + free_irq(netdev->irq, netdev); + + return 0; +} + +static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + void __iomem *txbuffer; + + if (!litex_read8(priv->base + LITEETH_READER_READY)) { + if (net_ratelimit()) + netdev_err(netdev, "LITEETH_READER_READY not ready\n"); + + netif_stop_queue(netdev); + + return NETDEV_TX_BUSY; + } + + /* Reject oversize packets */ + if (unlikely(skb->len > priv->slot_size)) { + if (net_ratelimit()) + netdev_err(netdev, "tx packet too big\n"); + + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; + netdev->stats.tx_errors++; + + return NETDEV_TX_OK; + } + + txbuffer = priv->tx_base + priv->tx_slot * priv->slot_size; + memcpy_toio(txbuffer, skb->data, skb->len); + litex_write8(priv->base + LITEETH_READER_SLOT, priv->tx_slot); + litex_write16(priv->base + LITEETH_READER_LENGTH, skb->len); + litex_write8(priv->base + LITEETH_READER_START, 1); + + netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_packets++; + + priv->tx_slot = (priv->tx_slot + 1) % priv->num_tx_slots; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops liteeth_netdev_ops = { + .ndo_open = liteeth_open, + .ndo_stop = liteeth_stop, + .ndo_start_xmit = liteeth_start_xmit, +}; + +static void liteeth_setup_slots(struct liteeth *priv) +{ + struct device_node *np = priv->dev->of_node; + int err; + + err = of_property_read_u32(np, "litex,rx-slots", &priv->num_rx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,rx-slots, using 2\n"); + priv->num_rx_slots = 2; + } + + err = of_property_read_u32(np, "litex,tx-slots", &priv->num_tx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,tx-slots, using 2\n"); + priv->num_tx_slots = 2; + } + + err = of_property_read_u32(np, "litex,slot-size", &priv->slot_size); + if (err) { + dev_dbg(priv->dev, "unable to get litex,slot-size, using 0x800\n"); + priv->slot_size = 0x800; + } +} + +static int liteeth_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + void __iomem *buf_base; + struct resource *res; + struct liteeth *priv; + int irq, err; + + netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv)); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + platform_set_drvdata(pdev, netdev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); + return irq; + } + netdev->irq = irq; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac"); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "buffer"); + buf_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(buf_base)) + return PTR_ERR(buf_base); + + liteeth_setup_slots(priv); + + /* Rx slots */ + priv->rx_base = buf_base; + priv->rx_slot = 0; + + /* Tx slots come after Rx slots */ + priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size; + priv->tx_slot = 0; + + err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + if (err) + eth_hw_addr_random(netdev); + + netdev->netdev_ops = &liteeth_netdev_ops; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev %d\n", err); + return err; + } + + netdev_info(netdev, "irq %d slots: tx %d rx %d size %d\n", + netdev->irq, priv->num_tx_slots, priv->num_rx_slots, priv->slot_size); + + return 0; +} + +static int liteeth_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +static const struct of_device_id liteeth_of_match[] = { + { .compatible = "litex,liteeth" }, + { } +}; +MODULE_DEVICE_TABLE(of, liteeth_of_match); + +static struct platform_driver liteeth_driver = { + .probe = liteeth_probe, + .remove = liteeth_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = liteeth_of_match, + }, +}; +module_platform_driver(liteeth_driver); + +MODULE_AUTHOR("Joel Stanley <[email protected]>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0e6d40701862..9d460a270601 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -105,7 +105,7 @@ #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 -#define MVNETA_TX_IN_PRGRS BIT(1) +#define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 2aa0ae8abfbb..3f982ccf2c85 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Marvell OcteonTX2 drivers configuration +# Marvell RVU Network drivers configuration # config OCTEONTX2_MBOX diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index cc8ac36cf687..7f4a4ca9af78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# Makefile for Marvell's RVU Admin Function driver # ccflags-y += -I$(src) @@ -10,4 +10,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ + rvu_sdp.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 544c96c8fe1d..7f3d01059e19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 237ba2b56210..ab1e4abdea38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef CGX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index aa4e42f78f13..f72ec0e2506f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __CGX_FW_INTF_H__ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index da824805c2ba..d9bea13f15b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Copyright (C) 2018 Marvell. */ #ifndef COMMON_H @@ -185,13 +181,16 @@ enum nix_scheduler { #define NIX_INTF_TYPE_CGX 0 #define NIX_INTF_TYPE_LBK 1 +#define NIX_INTF_TYPE_SDP 2 #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) #define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) -#define NIX_CHAN_SDP_CH_START (0x700ull) +#define NIX_CHAN_SDP_CH_START (0x700ull) +#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) +#define NIX_CHAN_SDP_NUM_CHANS 256 /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index a8b7b1c7a1d5..c38306b3384a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. + * */ #ifndef LMAC_COMMON_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 0a37ca96aab8..2898931d5260 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -412,5 +409,5 @@ const char *otx2_mbox_id2name(u16 id) } EXPORT_SYMBOL(otx2_mbox_id2name); -MODULE_AUTHOR("Marvell International Ltd."); +MODULE_AUTHOR("Marvell."); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index add4a39edced..154877706a0e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef MBOX_H @@ -87,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0007) +#define OTX2_MBOX_VERSION (0x0009) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -130,6 +127,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ @@ -191,6 +189,9 @@ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ +/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ +M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ +M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ npc_mcam_alloc_entry_rsp) \ @@ -243,7 +244,8 @@ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ -M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \ + nix_txschq_config) \ M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \ nix_vtag_config_rsp) \ @@ -268,13 +270,15 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ -M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ +M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \ nix_bandprof_alloc_rsp) \ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ - msg_rsp) + msg_rsp) \ +M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ + nix_bandprof_get_hwinfo_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ @@ -363,6 +367,25 @@ struct rsrc_detach { u8 cptlfs:1; }; +/* Number of resources available to the caller. + * In reply to MBOX_MSG_FREE_RSRC_CNT. + */ +struct free_rsrcs_rsp { + struct mbox_msghdr hdr; + u16 schq[NIX_TXSCH_LVL_CNT]; + u16 sso; + u16 tim; + u16 ssow; + u16 cpt; + u8 npa; + u8 nix; + u16 schq_nix1[NIX_TXSCH_LVL_CNT]; + u8 nix1; + u8 cpt1; + u8 ree0; + u8 ree1; +}; + #define MSIX_VECTOR_INVALID 0xFFFF #define MAX_RVU_BLKLF_CNT 256 @@ -370,16 +393,20 @@ struct msix_offset_rsp { struct mbox_msghdr hdr; u16 npa_msixoff; u16 nix_msixoff; - u8 sso; - u8 ssow; - u8 timlfs; - u8 cptlfs; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; - u8 cpt1_lfs; + u16 cpt1_lfs; + u16 ree0_lfs; + u16 ree1_lfs; u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT]; }; struct get_hw_cap_rsp { @@ -594,6 +621,7 @@ struct npa_lf_alloc_rsp { u32 stack_pg_ptrs; /* No of ptrs per stack page */ u32 stack_pg_bytes; /* Size of stack page */ u16 qints; /* NPA_AF_CONST::QINTS */ + u8 cache_lines; /*BATCH ALLOC DMA */ }; /* NPA AQ enqueue msg */ @@ -698,6 +726,9 @@ struct nix_lf_alloc_req { u16 sso_func; u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ u64 way_mask; +#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0) +#define NIX_LF_LBK_BLK_SEL BIT_ULL(1) + u64 flags; }; struct nix_lf_alloc_rsp { @@ -717,6 +748,7 @@ struct nix_lf_alloc_rsp { u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ u8 sdp_links; /* No. of SDP links present in HW */ + u8 tx_link; /* Transmit channel link number */ }; struct nix_lf_free_req { @@ -835,6 +867,7 @@ struct nix_txsch_free_req { struct nix_txschq_config { struct mbox_msghdr hdr; u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */ + u8 read; #define TXSCHQ_IDX_SHIFT 16 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1) #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK) @@ -842,6 +875,8 @@ struct nix_txschq_config { #define MAX_REGS_PER_MBOX_MSG 20 u64 reg[MAX_REGS_PER_MBOX_MSG]; u64 regval[MAX_REGS_PER_MBOX_MSG]; + /* All 0's => overwrite with new value */ + u64 regval_mask[MAX_REGS_PER_MBOX_MSG]; }; struct nix_vtag_config { @@ -1065,6 +1100,12 @@ struct nix_bandprof_free_req { u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC]; }; +struct nix_bandprof_get_hwinfo_rsp { + struct mbox_msghdr hdr; + u16 prof_count[BAND_PROF_NUM_LAYERS]; + u32 policer_timeunit; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1339,6 +1380,10 @@ struct set_vf_perm { struct lmtst_tbl_setup_req { struct mbox_msghdr hdr; + u64 dis_sched_early_comp :1; + u64 sch_ena :1; + u64 dis_line_pref :1; + u64 ssow_pf_func :13; u16 base_pcifunc; u8 use_local_lmt_region; u64 lmt_iova; @@ -1433,6 +1478,27 @@ struct cpt_rxc_time_cfg_req { u16 active_limit; }; +struct sdp_node_info { + /* Node to which this PF belons to */ + u8 node_id; + u8 max_vfs; + u8 num_pf_rings; + u8 pf_srn; +#define SDP_MAX_VFS 128 + u8 vf_rings[SDP_MAX_VFS]; +}; + +struct sdp_chan_info_msg { + struct mbox_msghdr hdr; + struct sdp_node_info info; +}; + +struct sdp_get_chan_info_msg { + struct mbox_msghdr hdr; + u16 chan_base; + u16 num_chan; +}; + /* CGX mailbox error codes * Range 1101 - 1200. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 243cf8070e77..3a819b24accc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_H @@ -172,6 +169,8 @@ enum key_fields { NPC_DMAC, NPC_SMAC, NPC_ETYPE, + NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ + NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, NPC_TOS, NPC_SIP_IPV4, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index fee655cc7523..588822a0cf21 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_PROFILE_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 1ee37853f338..9b8e59f4c206 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #include <linux/bitfield.h> @@ -19,12 +20,11 @@ #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 -#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 +#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 -#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 -#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 -#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 +#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600 #define PCI_DEVID_OCTEONTX2_RST 0xA085 +#define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 #define PCI_RST_BAR_NO 0 @@ -39,6 +39,9 @@ #define RST_MUL_BITS GENMASK_ULL(38, 33) #define CLOCK_BASE_RATE 50000000ULL +static struct ptp *first_ptp_block; +static const struct pci_device_id ptp_id_table[]; + static u64 get_clock_rate(void) { u64 cfg, ret = CLOCK_BASE_RATE * 16; @@ -74,23 +77,14 @@ error: struct ptp *ptp_get(void) { - struct pci_dev *pdev; - struct ptp *ptp; + struct ptp *ptp = first_ptp_block; - /* If the PTP pci device is found on the system and ptp - * driver is bound to it then the PTP pci device is returned - * to the caller(rvu driver). - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_PTP, NULL); - if (!pdev) + /* Check PTP block is present in hardware */ + if (!pci_dev_present(ptp_id_table)) return ERR_PTR(-ENODEV); - - ptp = pci_get_drvdata(pdev); + /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); - if (IS_ERR(ptp)) - pci_dev_put(pdev); return ptp; } @@ -190,6 +184,8 @@ static int ptp_probe(struct pci_dev *pdev, writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); pci_set_drvdata(pdev, ptp); + if (!first_ptp_block) + first_ptp_block = ptp; return 0; @@ -204,6 +200,9 @@ error: * `dev->driver_data`. */ pci_set_drvdata(pdev, ERR_PTR(err)); + if (!first_ptp_block) + first_ptp_block = ERR_PTR(err); + return 0; } @@ -233,19 +232,14 @@ static const struct pci_device_id ptp_id_table[] = { PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CN10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_B_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) }, { 0, } }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 878bc395d28f..76d404b24552 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #ifndef PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index a91ccdc59403..07b0eafccad8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index d32e74bd5964..f0b069442dcc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 84f0aaa8665d..ce647e037f4d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -70,18 +67,21 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) hw->cap.nix_shaping = true; hw->cap.nix_tx_link_bp = true; hw->cap.nix_rx_multicast = true; + hw->cap.nix_shaper_toggle_wait = false; hw->rvu = rvu; - if (is_rvu_96xx_B0(rvu)) { + if (is_rvu_pre_96xx_C0(rvu)) { hw->cap.nix_fixed_txschq_mapping = true; hw->cap.nix_txsch_per_cgx_lmac = 4; hw->cap.nix_txsch_per_lbk_lmac = 132; hw->cap.nix_txsch_per_sdp_lmac = 76; hw->cap.nix_shaping = false; hw->cap.nix_tx_link_bp = false; - if (is_rvu_96xx_A0(rvu)) + if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) hw->cap.nix_rx_multicast = false; } + if (!is_rvu_pre_96xx_C0(rvu)) + hw->cap.nix_shaper_toggle_wait = true; if (!is_rvu_otx2(rvu)) hw->cap.per_pf_mbox_regs = true; @@ -1115,6 +1115,12 @@ cpt: goto nix_err; } + err = rvu_sdp_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__); + goto nix_err; + } + rvu_program_channels(rvu); return 0; @@ -1367,9 +1373,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) int blkaddr = BLKADDR_NIX0, vf; struct rvu_pfvf *pf; + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { - pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); blkaddr = pf->nix_blkaddr; } else if (is_afvf(pcifunc)) { vf = pcifunc - 1; @@ -1382,6 +1389,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) blkaddr = BLKADDR_NIX0; } + /* if SDP1 then the blkaddr is NIX1 */ + if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + blkaddr = BLKADDR_NIX1; + switch (blkaddr) { case BLKADDR_NIX1: pfvf->nix_blkaddr = BLKADDR_NIX1; @@ -1782,6 +1793,99 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req, + struct free_rsrcs_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + mutex_lock(&rvu->rsrc_lock); + + block = &hw->block[BLKADDR_NPA]; + rsp->npa = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX0]; + rsp->nix = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX1]; + rsp->nix1 = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSO]; + rsp->sso = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSOW]; + rsp->ssow = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_TIM]; + rsp->tim = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT0]; + rsp->cpt = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT1]; + rsp->cpt1 = rvu_rsrc_free_count(&block->lf); + + if (rvu->hw->cap.nix_fixed_txschq_mapping) { + rsp->schq[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq[NIX_TXSCH_LVL_TL2] = 1; + /* NIX1 */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1; + } else { + nix_hw = get_nix_hw(hw, BLKADDR_NIX0); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + + nix_hw = get_nix_hw(hw, BLKADDR_NIX1); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + } + + rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1; +out: + rsp->schq[NIX_TXSCH_LVL_TL1] = 1; + mutex_unlock(&rvu->rsrc_lock); + + return 0; +} + int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -2447,11 +2551,12 @@ static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) for (vf = 0; vf < numvfs; vf++) { if (!(intr & BIT_ULL(vf))) continue; - dev = vf + start_vf + rvu->hw->total_pfs; - queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); /* Clear and disable the interrupt */ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); } } @@ -2467,14 +2572,14 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (intr & (1ULL << pf)) { - /* PF is already dead do only AF related operations */ - queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); /* clear interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, BIT_ULL(pf)); /* Disable the interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, BIT_ULL(pf)); + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index d88f595e63b0..d38e5c980c30 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_H @@ -243,8 +240,11 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u8 lbkid; /* NIX0/1 lbk link ID */ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ + u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/ unsigned long flags; + struct sdp_node_info *sdp_info; }; enum rvu_pfvf_flags { @@ -314,6 +314,7 @@ struct nix_hw { struct nix_lso lso; struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; + u64 *tx_credits; }; /* RVU block's capabilities or functionality, @@ -327,6 +328,7 @@ struct hw_cap { u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ bool nix_shaping; /* Is shaping and coloring supported */ + bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ bool nix_rx_multicast; /* Rx packet replication support */ bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */ @@ -516,20 +518,34 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) } /* Silicon revisions */ +static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + /* 96XX A0/B0, 95XX A0/A1/B0 chips */ + return ((pdev->revision == 0x00) || (pdev->revision == 0x01) || + (pdev->revision == 0x10) || (pdev->revision == 0x11) || + (pdev->revision == 0x14)); +} + static inline bool is_rvu_96xx_A0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return (pdev->revision == 0x00) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00); } static inline bool is_rvu_96xx_B0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00) || (pdev->revision == 0x01); +} + +static inline bool is_rvu_95xx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x10) || (pdev->revision == 0x11); } /* REVID for PCIe devices. @@ -538,9 +554,10 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu) */ #define PCI_REVISION_ID_96XX 0x00 #define PCI_REVISION_ID_95XX 0x10 -#define PCI_REVISION_ID_LOKI 0x20 +#define PCI_REVISION_ID_95XXN 0x20 #define PCI_REVISION_ID_98XX 0x30 #define PCI_REVISION_ID_95XXMM 0x40 +#define PCI_REVISION_ID_95XXO 0xE0 static inline bool is_rvu_otx2(struct rvu *rvu) { @@ -549,8 +566,8 @@ static inline bool is_rvu_otx2(struct rvu *rvu) u8 midr = pdev->revision & 0xF0; return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || - midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || - midr == PCI_REVISION_ID_95XXMM); + midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || + midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, @@ -580,6 +597,16 @@ static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; } +static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan) +{ + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return NIX_CHAN_SDP_CHX(chan); + + return hw->sdp_chan_base + chan; +} + static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) { return rvu->hw->cpt_chan_base + chan; @@ -642,10 +669,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, int qsize, int inst_size, int res_size); void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); +/* SDP APIs */ +int rvu_sdp_init(struct rvu *rvu); +bool is_sdp_pfvf(u16 pcifunc); +bool is_sdp_pf(u16 pcifunc); +bool is_sdp_vf(u16 pcifunc); + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { - return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); + return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && + !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -749,7 +783,6 @@ bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); int npc_flow_steering_init(struct rvu *rvu, int blkaddr); const char *npc_get_field_name(u8 hdr); int npc_get_bank(struct npc_mcam *mcam, int index); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index d34e59525a09..81e8ea9ee30e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/types.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index dbe9149a215e..46a41cfff575 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell RPM CN10K driver +/* Marvell RPM CN10K driver * * Copyright (C) 2020 Marvell. */ @@ -49,6 +49,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, return 0; } +#define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + @@ -131,9 +132,11 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, struct lmtst_tbl_setup_req *req, struct msg_rsp *rsp) { - u64 lmt_addr, val; - u32 pri_tbl_idx; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + u32 pri_tbl_idx, tbl_idx; + u64 lmt_addr; int err = 0; + u64 val; /* Check if PF_FUNC wants to use it's own local memory as LMTLINE * region, if so, convert that IOVA to physical address and @@ -170,7 +173,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", pri_tbl_idx, err); - return err; + goto error; } /* Update the base lmt addr of secondary with primary's base @@ -181,7 +184,53 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, return err; } - return 0; + /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S + * like enabling scheduled LMTST, disable LMTLINE prefetch, disable + * early completion for ordered LMTST. + */ + if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) { + tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + + /* Storing lmt map table entry word1 default value as this needs + * to be reverted in FLR. Also making sure this default value + * doesn't get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_map_ent_w1) + pfvf->lmt_map_ent_w1 = val; + + /* Disable early completion for Ordered LMTSTs. */ + if (req->dis_sched_early_comp) + val |= (req->dis_sched_early_comp << + APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT); + /* Enable scheduled LMTST */ + if (req->sch_ena) + val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) | + req->ssow_pf_func; + /* Disables LMTLINE prefetch before receiving store data. */ + if (req->dis_line_pref) + val |= (req->dis_line_pref << + APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT); + + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + } + +error: + return err; } /* Resetting the lmtst map table to original base addresses */ @@ -194,19 +243,36 @@ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) if (is_rvu_otx2(rvu)) return; - if (pfvf->lmt_base_addr) { + if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) { /* This corresponds to lmt map table index */ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); /* Reverting back original lmt base addr for respective * pcifunc. */ - err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, - LMT_TBL_OP_WRITE); - if (err) - dev_err(rvu->dev, - "Failed to update LMT map table: index 0x%x err %d\n", - tbl_idx, err); - pfvf->lmt_base_addr = 0; + if (pfvf->lmt_base_addr) { + err = lmtst_map_table_ops(rvu, tbl_idx, + &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } + /* Reverting back to orginal word1 val of lmtst map table entry + * which underwent changes. + */ + if (pfvf->lmt_map_ent_w1) { + err = lmtst_map_table_ops(rvu, + tbl_idx + LMT_MAP_TBL_W1_OFF, + &pfvf->lmt_map_ent_w1, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + pfvf->lmt_map_ent_w1 = 0; + } } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 89253f7bdadb..1f90a7403392 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2020 Marvell. */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/bitfield.h> #include <linux/pci.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 9b2dfbf90e51..9338765da048 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2019 Marvell International Ltd. + * Copyright (C) 2019 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index a55b46ad162d..274d3abe30eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h index 471e57dedb20..51efe88dce11 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index c5e3f90e562d..8f37477e0cb5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -290,16 +287,22 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, return true; } -static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, + struct nix_lf_alloc_rsp *rsp, bool loop) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); + u16 req_chan_base, req_chan_end, req_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + struct sdp_node_info *sdp_info; + int pkind, pf, vf, lbkid, vfid; struct mac_ops *mac_ops; - int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; + bool from_vf; int err; pf = rvu_get_pf(pcifunc); - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; switch (type) { @@ -317,10 +320,13 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); + /* By default we enable pause frames */ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, @@ -340,6 +346,25 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) if (rvu->hw->lbk_links > 1) lbkid = vf & 0x1 ? 0 : 1; + /* By default NIX0 is configured to send packet on lbk link 1 + * (which corresponds to LBK1), same packet will receive on + * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 + * (which corresponds to LBK2) packet will receive on NIX0 lbk + * link 1. + * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 + * transmits and receives on lbk link 0, whick corresponds + * to LBK1 block, back to back connectivity between NIX and + * LBK can be achieved (which is similar to 96xx) + * + * RX TX + * NIX0 lbk link 1 (LBK2) 1 (LBK1) + * NIX0 lbk link 0 (LBK0) 0 (LBK0) + * NIX1 lbk link 0 (LBK1) 0 (LBK2) + * NIX1 lbk link 1 (LBK3) 1 (LBK3) + */ + if (loop) + lbkid = !lbkid; + /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. @@ -350,10 +375,51 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = hw->cgx_links + lbkid; + pfvf->lbkid = lbkid; rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); + + break; + case NIX_INTF_TYPE_SDP: + from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + sdp_info = parent_pf->sdp_info; + if (!sdp_info) { + dev_err(rvu->dev, "Invalid sdp_info pointer\n"); + return -EINVAL; + } + if (from_vf) { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + + sdp_info->num_pf_rings; + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + for (vfid = 0; vfid < vf; vfid++) + req_chan_base += sdp_info->vf_rings[vfid]; + req_chan_cnt = sdp_info->vf_rings[vf]; + req_chan_end = req_chan_base + req_chan_cnt - 1; + if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || + req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid channel base and count\n", + pcifunc); + return -EINVAL; + } + } else { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; + req_chan_cnt = sdp_info->num_pf_rings; + } + + pfvf->rx_chan_base = req_chan_base; + pfvf->rx_chan_cnt = req_chan_cnt; + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; + + rsp->tx_link = hw->cgx_links + hw->lbk_links; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); break; } @@ -434,9 +500,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { - int bpid, blkaddr, lmac_chan_cnt; + int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; + u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; struct rvu_hwinfo *hw = rvu->hw; - u16 cgx_bpid_cnt, lbk_bpid_cnt; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; u64 cfg; @@ -445,8 +511,12 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); lmac_chan_cnt = cfg & 0xFF; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + sdp_chan_cnt = cfg & 0xFFF; + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -484,6 +554,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) return -EINVAL; break; + case NIX_INTF_TYPE_SDP: + if ((req->chan_base + req->chan_cnt) > 255) + return -EINVAL; + + bpid = sdp_bpid_cnt + req->chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) + return -EINVAL; + break; default: return -EINVAL; } @@ -503,9 +584,12 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, pf = rvu_get_pf(pcifunc); type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; - /* Enable backpressure only for CGX mapped PFs and LBK interface */ - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -522,8 +606,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, } cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + cfg &= ~GENMASK_ULL(8, 0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), - cfg | (bpid & 0xFF) | BIT_ULL(16)); + cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); } @@ -671,9 +756,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size, - u64 way_mask) + u64 way_mask, bool tag_lsb_as_adder) { int err, grp, num_indices; + u64 val; /* RSS is not requested for this NIXLF */ if (!rss_sz) @@ -689,10 +775,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, (u64)pfvf->rss_ctx->iova); /* Config full RSS table size, enable RSS and caching */ - rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), - BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | - way_mask << 20); + val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); + + if (tag_lsb_as_adder) + val |= BIT_ULL(5); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -1241,7 +1330,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, - req->rss_grps, hwctx_size, req->way_mask); + req->rss_grps, hwctx_size, req->way_mask, + !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); if (err) goto free_mem; @@ -1299,7 +1389,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - err = nix_interface_init(rvu, pcifunc, intf, nixlf); + if (is_sdp_pfvf(pcifunc)) + intf = NIX_INTF_TYPE_SDP; + + err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, + !!(req->flags & NIX_LF_LBK_BLK_SEL)); if (err) goto free_mem; @@ -1423,12 +1517,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, return 0; } +/* Handle shaper update specially for few revisions */ +static bool +handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, + int lvl, u64 reg, u64 regval) +{ + u64 regbase, oldval, sw_xoff = 0; + u64 dbgval, md_debug0 = 0; + unsigned long poll_tmo; + bool rate_reg = 0; + u32 schq; + + regbase = reg & 0xFFFF; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + + /* Check for rate register */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); + + rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); + break; + case NIX_TXSCH_LVL_TL2: + md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL3: + md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL4: + md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)); + break; + case NIX_TXSCH_LVL_MDQ: + sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); + rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)); + break; + } + + if (!rate_reg) + return false; + + /* Nothing special to do when state is not toggled */ + oldval = rvu_read64(rvu, blkaddr, reg); + if ((oldval & 0x1) == (regval & 0x1)) { + rvu_write64(rvu, blkaddr, reg, regval); + return true; + } + + /* PIR/CIR disable */ + if (!(regval & 0x1)) { + rvu_write64(rvu, blkaddr, sw_xoff, 1); + rvu_write64(rvu, blkaddr, reg, 0); + udelay(4); + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; + } + + /* PIR/CIR enable */ + rvu_write64(rvu, blkaddr, sw_xoff, 1); + if (md_debug0) { + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ + do { + if (time_after(jiffies, poll_tmo)) { + dev_err(rvu->dev, + "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", + nixlf, schq, lvl); + goto exit; + } + usleep_range(1, 5); + dbgval = rvu_read64(rvu, blkaddr, md_debug0); + } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); + } + rvu_write64(rvu, blkaddr, reg, regval); +exit: + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; +} + /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, - int lvl, int schq) + int nixlf, int lvl, int schq) { + struct rvu_hwinfo *hw = rvu->hw; u64 cir_reg = 0, pir_reg = 0; u64 cfg; @@ -1449,6 +1635,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, cir_reg = NIX_AF_TL4X_CIR(schq); pir_reg = NIX_AF_TL4X_PIR(schq); break; + case NIX_TXSCH_LVL_MDQ: + cir_reg = NIX_AF_MDQX_CIR(schq); + pir_reg = NIX_AF_MDQX_PIR(schq); + break; + } + + /* Shaper state toggle needs wait/poll */ + if (hw->cap.nix_shaper_toggle_wait) { + if (cir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, cir_reg, 0); + if (pir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, pir_reg, 0); + return; } if (!cir_reg) @@ -1466,6 +1667,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; + int link_level; int link; if (lvl >= hw->cap.nix_tx_aggr_lvl) @@ -1475,7 +1677,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); - if (lvl != NIX_TXSCH_LVL_TL2) + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl != link_level) return; /* Reset TL2's CGX or LBK link config */ @@ -1484,6 +1688,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } +static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 reg; + + /* Skip this if shaping is not supported */ + if (!hw->cap.nix_shaping) + return; + + /* Clear level specific SW_XOFF */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_MDQ: + reg = NIX_AF_MDQX_SW_XOFF(schq); + break; + default: + return; + } + + rvu_write64(rvu, blkaddr, reg, 0x0); +} + static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; @@ -1661,15 +1899,14 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, int link, blkaddr, rc = 0; int lvl, idx, start, end; struct nix_txsch *txsch; - struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; u32 *pfvf_map; + int nixlf; u16 schq; - pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) @@ -1718,7 +1955,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } for (idx = 0; idx < req->schq[lvl]; idx++) { @@ -1727,7 +1964,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } } @@ -1744,8 +1981,8 @@ exit: return rc; } -static void nix_smq_flush(struct rvu *rvu, int blkaddr, - int smq, u16 pcifunc, int nixlf) +static int nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) { int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; @@ -1780,6 +2017,7 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr, /* restore cgx tx state */ if (restore_tx_en) cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + return err; } static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) @@ -1788,6 +2026,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; + u16 map_func; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1801,19 +2040,36 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; - /* Disable TL2/3 queue links before SMQ flush*/ + /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ mutex_lock(&rvu->rsrc_lock); - for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; - txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); } } + nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, + nix_get_tx_link(rvu, pcifunc)); + + /* On PF cleanup, clear cfg done flag as + * PF would have changed default config. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; + schq = nix_get_tx_link(rvu, pcifunc); + /* Do not clear pcifunc in txsch->pfvf_map[schq] because + * VF might be using this TL1 queue + */ + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); + } /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; @@ -1859,6 +2115,7 @@ static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; + int rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1883,15 +2140,24 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_lock(&rvu->rsrc_lock); if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { - mutex_unlock(&rvu->rsrc_lock); + rc = NIX_AF_ERR_TLX_INVALID; goto err; } + /* Clear SW_XOFF of this resource only. + * For SMQ level, all path XOFF's + * need to be made clear by user + */ + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); + /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) - nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + if (lvl == NIX_TXSCH_LVL_SMQ && + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { + rc = NIX_AF_SMQ_FLUSH_FAILED; + goto err; + } /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); @@ -1899,7 +2165,8 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_unlock(&rvu->rsrc_lock); return 0; err: - return NIX_AF_ERR_TLX_INVALID; + mutex_unlock(&rvu->rsrc_lock); + return rc; } int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, @@ -1982,6 +2249,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) regbase == NIX_AF_TL4X_PIR(0)) return false; break; + case NIX_TXSCH_LVL_MDQ: + if (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)) + return false; + break; } return true; } @@ -2014,6 +2286,33 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +/* Register offset - [15:0] + * Scheduler Queue number - [25:16] + */ +#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) + +static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, struct nix_txschq_config *req, + struct nix_txschq_config *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int idx, schq; + u64 reg; + + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || + !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) + return NIX_AF_INVAL_TXSCHQ_CFG; + rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); + } + rsp->lvl = req->lvl; + rsp->num_regs = req->num_regs; + return 0; +} + static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, struct nix_txsch *txsch) { @@ -2045,11 +2344,11 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, - struct msg_rsp *rsp) + struct nix_txschq_config *rsp) { + u64 reg, val, regval, schq_regbase, val_mask; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - u64 reg, regval, schq_regbase; struct nix_txsch *txsch; struct nix_hw *nix_hw; int blkaddr, idx, err; @@ -2068,6 +2367,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; + if (req->read) + return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); + txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; @@ -2082,8 +2384,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; + val_mask = req->regval_mask[idx]; if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, txsch->lvl, reg, regval)) @@ -2093,6 +2397,15 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!is_txschq_shaping_valid(hw, req->lvl, reg)) continue; + val = rvu_read64(rvu, blkaddr, reg); + regval = (val & val_mask) | (regval & ~val_mask); + + /* Handle shaping state toggle specially */ + if (hw->cap.nix_shaper_toggle_wait && + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + req->lvl, reg, regval)) + continue; + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], @@ -2133,7 +2446,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); - return 0; } @@ -2523,14 +2835,19 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_mce_list *mce_list; + int pf; - /* skip multicast pkt replication for AF's VFs */ - if (is_afvf(pcifunc)) + /* skip multicast pkt replication for AF's VFs & SDP links */ + if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; + pf = rvu_get_pf(pcifunc); + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return -EINVAL; @@ -3457,6 +3774,77 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } +static int +nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, + u16 pcifunc, u64 tx_credits) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + unsigned long poll_tmo; + bool restore_tx_en = 0; + struct nix_hw *nix_hw; + u64 cfg, sw_xoff = 0; + u32 schq = 0; + u32 credits; + int rc; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + if (tx_credits == nix_hw->tx_credits[link]) + return 0; + + /* Enable cgx tx if disabled for credits to be back */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + mutex_lock(&rvu->rsrc_lock); + /* Disable new traffic to link */ + if (hw->cap.nix_shaping) { + schq = nix_get_tx_link(rvu, pcifunc); + sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); + rvu_write64(rvu, blkaddr, + NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); + } + + rc = -EBUSY; + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait for credits to return */ + do { + if (time_after(jiffies, poll_tmo)) + goto exit; + usleep_range(100, 200); + + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link)); + credits = (cfg >> 12) & 0xFFFFFULL; + } while (credits != nix_hw->tx_credits[link]); + + cfg &= ~(0xFFFFFULL << 12); + cfg |= (tx_credits << 12); + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + rc = 0; + + nix_hw->tx_credits[link] = tx_credits; + +exit: + /* Enable traffic back */ + if (hw->cap.nix_shaping && !sw_xoff) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); + + /* Restore state of cgx tx */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + + mutex_unlock(&rvu->rsrc_lock); + return rc; +} + int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { @@ -3467,6 +3855,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct nix_txsch *txsch; u64 cfg, lmac_fifo_len; struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; @@ -3523,7 +3912,8 @@ rx_frscfg: link = (cgx * hw->lmac_per_cgx) + lmac; } else if (pf == 0) { /* For VFs of PF0 ingress is LBK port, so config LBK link */ - link = hw->cgx_links; + pfvf = rvu_get_pfvf(rvu, pcifunc); + link = hw->cgx_links + pfvf->lbkid; } if (link < 0) @@ -3545,11 +3935,8 @@ linkcfg: lmac_fifo_len = rvu_cgx_get_fifolen(rvu) / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); - cfg &= ~(0xFFFFFULL << 12); - cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - return 0; + return nix_config_link_credits(rvu, blkaddr, link, pcifunc, + (lmac_fifo_len - req->maxlen) / 16); } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -3593,12 +3980,13 @@ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } -static void nix_link_config(struct rvu *rvu, int blkaddr) +static void nix_link_config(struct rvu *rvu, int blkaddr, + struct nix_hw *nix_hw) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; u16 lbk_max_frs, lmac_max_frs; - u64 tx_credits; + u64 tx_credits, cfg; rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); @@ -3629,15 +4017,18 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + /* Skip when cgx is not available or lmac cnt is zero */ + if (lmac_cnt <= 0) + continue; tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ - tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); slink = cgx * hw->lmac_per_cgx; for (link = slink; link < (slink + lmac_cnt); link++) { + nix_hw->tx_credits[link] = tx_credits; rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link), - tx_credits); + NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); } } @@ -3645,6 +4036,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); + nix_hw->tx_credits[link] = tx_credits; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, @@ -3908,8 +4300,13 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, + sizeof(u64), GFP_KERNEL); + if (!nix_hw->tx_credits) + return -ENOMEM; + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ - nix_link_config(rvu, blkaddr); + nix_link_config(rvu, blkaddr, nix_hw); /* Enable Channel backpressure */ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); @@ -3965,6 +4362,8 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, kfree(txsch->schq.bmap); } + kfree(nix_hw->tx_credits); + nix_ipolicer_freemem(rvu, nix_hw); vlan = &nix_hw->txvlan; @@ -4771,3 +5170,36 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, rvu_free_rsrc(&ipolicer->band_prof, mid_prof); } } + +int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, + struct nix_bandprof_get_hwinfo_rsp *rsp) +{ + struct nix_ipolicer *ipolicer; + int blkaddr, layer, err; + struct nix_hw *nix_hw; + u64 tu; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + /* Return number of bandwidth profiles free at each layer */ + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); + } + mutex_unlock(&rvu->rsrc_lock); + + /* Set the policer timeunit in nanosec */ + tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); + rsp->policer_timeunit = (tu + 1) * 100; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 24c2bfdfec4e..70bd036ed76e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -419,6 +416,10 @@ exit: rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; rsp->stack_pg_bytes = cfg & 0xFF; rsp->qints = (cfg >> 28) & 0xFFF; + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + rsp->cache_lines = (cfg >> 1) & 0x3F; + } return rc; } @@ -478,6 +479,13 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) #endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + /* For CN10K NPA BATCH DMA set 35 cache lines */ + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + cfg &= ~0x7EULL; + cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg); + } /* Result structure can be followed by Aura/Pool context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 6f231008c8a4..b95485882dc9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/bitfield.h> @@ -85,36 +82,6 @@ static int npc_mcam_verify_pf_func(struct rvu *rvu, return 0; } -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) -{ - int pf = rvu_get_pf(pcifunc); - u8 cgx_id, lmac_id; - int base = 0, end; - - if (is_npc_intf_tx(intf)) - return 0; - - /* return in case of AF installed rules */ - if (is_pffunc_af(pcifunc)) - return 0; - - if (is_afvf(pcifunc)) { - end = rvu_get_num_lbk_chans(); - if (end < 0) - return -EINVAL; - } else { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); - /* CGX mapped functions has maximum of 16 channels */ - end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); - } - - if (channel < base || channel > end) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -634,8 +601,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, struct nix_rx_action action; int blkaddr, index; - /* AF's VFs work in promiscuous mode */ - if (is_afvf(pcifunc)) + /* AF's and SDP VFs work in promiscuous mode */ + if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -863,7 +830,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc)) + if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -2706,7 +2673,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -2714,10 +2680,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) @@ -2740,12 +2702,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, nix_intf = pfvf->nix_rx_intf; if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - - if (!is_pffunc_af(pcifunc) && npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; @@ -3091,7 +3047,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 entry = NPC_MCAM_ENTRY_INVALID; u16 cntr = NPC_MCAM_ENTRY_INVALID; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -3102,13 +3057,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - - if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) - return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, req->hdr.pcifunc)) return NPC_MCAM_INVALID_REQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 9bde1bb7e148..51ddc7b81d0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. */ @@ -20,6 +20,8 @@ static const char * const npc_flow_names[] = { [NPC_DMAC] = "dmac", [NPC_SMAC] = "smac", [NPC_ETYPE] = "ether type", + [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", + [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", [NPC_TOS] = "tos", [NPC_SIP_IPV4] = "ipv4 source ip", @@ -492,6 +494,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (*features & BIT_ULL(NPC_OUTER_VID)) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); + + /* for vlan ethertypes corresponding layer type should be in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) + *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | + BIT_ULL(NPC_VLAN_ETYPE_STAG); } /* Scan key extraction profile and record how fields of our interest @@ -747,6 +754,28 @@ static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, } } +static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, + u64 features, u8 intf) +{ + bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); + bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); + bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); + + /* If only VLAN id is given then always match outer VLAN id */ + if (vid && !ctag && !stag) { + npc_update_entry(rvu, NPC_LB, entry, + NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, + NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); + return; + } + if (ctag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, + ~0ULL, 0, intf); + if (stag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, + ~0ULL, 0, intf); +} + static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, u64 features, struct flow_msg *pkt, struct flow_msg *mask, @@ -779,11 +808,6 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 0, ~0ULL, 0, intf); - if (features & BIT_ULL(NPC_OUTER_VID)) - npc_update_entry(rvu, NPC_LB, entry, - NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, - NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); - /* For AH, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_AH)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, @@ -829,6 +853,7 @@ do { \ ntohs(mask->vlan_tci), 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); + npc_update_vlan_features(rvu, entry, features, intf); } static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, @@ -1173,11 +1198,6 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (err) return NPC_FLOW_NOT_SUPPORTED; - /* Skip channel validation if AF is installing */ - if (!is_pffunc_af(req->hdr.pcifunc) && - npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) - return NPC_FLOW_CHAN_INVALID; - pfvf = rvu_get_pfvf(rvu, target); /* PF installing for its VF */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index e266f0c49559..b3150f053291 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -33,8 +30,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, - {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, - {0x1610, 0x1618} } }, + {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, + {0x1610, 0x1618}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 960ee1c2e178..576b037a00f0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_REG_H @@ -156,6 +153,7 @@ #define NPA_AF_AQ_DONE_INT_W1S (0x0688) #define NPA_AF_AQ_DONE_ENA_W1S (0x0690) #define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_BATCH_CTL (0x06a0) #define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) #define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) #define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) @@ -703,5 +701,8 @@ #define APR_AF_LMT_CFG (0x000ull) #define APR_AF_LMT_MAP_BASE (0x008ull) #define APR_AF_LMT_CTL (0x010ull) +#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23 +#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22 +#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21 #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c new file mode 100644 index 000000000000..b04fb226f708 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + * + */ + +#include <linux/pci.h> +#include "rvu.h" + +/* SDP PF device id */ +#define PCI_DEVID_OTX2_SDP_PF 0xA0F6 + +/* Maximum SDP blocks in a chip */ +#define MAX_SDP 2 + +/* SDP PF number */ +static int sdp_pf_num[MAX_SDP] = {-1, -1}; + +bool is_sdp_pfvf(u16 pcifunc) +{ + u16 pf = rvu_get_pf(pcifunc); + u32 found = 0, i = 0; + + while (i < MAX_SDP) { + if (pf == sdp_pf_num[i]) + found = 1; + i++; + } + + if (!found) + return false; + + return true; +} + +bool is_sdp_pf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +bool is_sdp_vf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !!(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +int rvu_sdp_init(struct rvu *rvu) +{ + struct pci_dev *pdev = NULL; + struct rvu_pfvf *pfvf; + u32 i = 0; + + while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OTX2_SDP_PF, + pdev)) != NULL) { + /* The RVU PF number is one less than bus number */ + sdp_pf_num[i] = pdev->bus->number - 1; + pfvf = &rvu->pf[sdp_pf_num[i]]; + + pfvf->sdp_info = devm_kzalloc(rvu->dev, + sizeof(struct sdp_node_info), + GFP_KERNEL); + if (!pfvf->sdp_info) + return -ENOMEM; + + dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); + + put_device(&pdev->dev); + i++; + } + + return 0; +} + +int +rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, + struct sdp_chan_info_msg *req, + struct msg_rsp *rsp) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info)); + dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", + req->info.node_id, req->info.max_vfs, req->info.num_pf_rings, + req->info.pf_srn); + return 0; +} + +int +rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, + struct sdp_get_chan_info_msg *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + if (!hw->cap.programmable_chans) { + rsp->chan_base = NIX_CHAN_SDP_CH_START; + rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS; + } else { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + rsp->chan_base = hw->sdp_chan_base; + rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL; + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5bbe6727d11d..77ac96693f04 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 820adf390b8e..3392487f6b47 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2021 Marvell. + * */ #include <linux/bitfield.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 56f90cf9c4c0..775fd4c35794 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #define CREATE_TRACE_POINTS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 6af97ce69443..28984d0e848a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #undef TRACE_SYSTEM diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index fcaa7df404f3..b92c267628b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 ethernet device drivers +# Makefile for Marvell's RVU Ethernet device drivers # obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index ccffddad1233..3cc76f14d2fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #include "cn10k.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index e07723d71a26..8ae96815865e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Ethernet driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #ifndef CN10K_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index f630e5713025..ce25c2744435 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/interrupt.h> @@ -289,8 +286,10 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) rsp = (struct nix_rss_flowkey_cfg_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); - if (IS_ERR(rsp)) + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); goto fail; + } pfvf->hw.flowkey_alg_idx = rsp->alg_idx; fail: @@ -584,25 +583,6 @@ void otx2_get_mac_from_af(struct net_device *netdev) } EXPORT_SYMBOL(otx2_get_mac_from_af); -static int otx2_get_link(struct otx2_nic *pfvf) -{ - int link = 0; - u16 map; - - /* cgx lmac link */ - if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); - } - /* LBK channel */ - if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = pfvf->hw.cgx_links | ((map >> 8) & 0xF); - } - - return link; -} - int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) { struct otx2_hw *hw = &pfvf->hw; @@ -661,8 +641,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; req->num_regs++; - req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, - otx2_get_link(pfvf)); + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure */ req->regval[2] = BIT_ULL(13) | BIT_ULL(12); @@ -1610,6 +1589,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; pfvf->hw.lbk_links = rsp->lbk_links; + pfvf->hw.tx_link = rsp->tx_link; } EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 1a97b76a12e1..48227cec06ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_COMMON_H @@ -218,6 +215,7 @@ struct otx2_hw { u64 cgx_fec_uncorr_blks; u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ + u8 tx_link; /* Transmit channel link number */ #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 @@ -837,8 +835,6 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc); int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, - struct npc_install_flow_req *req); int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 383a6b5cb698..2ec800f741d8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 5ce087686a1f..799486c72177 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/pci.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index e949001a6e55..77a13fb555fb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. + * */ #include <net/ipv6.h> @@ -762,7 +763,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, return 0; } -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, +static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req) { struct ethhdr *eth_mask = &fsp->m_u.ether_spec; @@ -818,8 +819,30 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return -EOPNOTSUPP; } if (fsp->flow_type & FLOW_EXT) { - if (fsp->m_ext.vlan_etype) - return -EINVAL; + u16 vlan_etype; + + if (fsp->m_ext.vlan_etype) { + /* Partial masks not supported */ + if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF) + return -EINVAL; + + vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype); + /* Only ETH_P_8021Q and ETH_P_802AD types supported */ + if (vlan_etype != ETH_P_8021Q && + vlan_etype != ETH_P_8021AD) + return -EINVAL; + + memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype, + sizeof(pkt->vlan_etype)); + memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype, + sizeof(pmask->vlan_etype)); + + if (vlan_etype == ETH_P_8021Q) + req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG); + else + req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG); + } + if (fsp->m_ext.vlan_tci) { memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, sizeof(pkt->vlan_tci)); @@ -995,6 +1018,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!flow) return -ENOMEM; flow->location = fsp->location; + flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -1046,7 +1070,6 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow_cfg->max_flows - 1); err = -EINVAL; } else { - flow->entry = flow_cfg->flow_ent[flow->location]; err = otx2_add_flow_msg(pfvf, flow); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 6fe6b8d9daa1..2f2e8a3d7924 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Physical Function ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 56390a664517..ec9e49985c2c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 PTP support for ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h index 706d63a43ae1..6ff284211d7b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -1,5 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 PTP support for ethernet driver */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #ifndef OTX2_PTP_H #define OTX2_PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index f4fd72ee9a25..1b967eaf948b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_REG_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index 1f49b3caf5d4..4bbd12ff26e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 0aa2149ae968..626961a41089 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ + #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 22ec03a618b1..f42b1d4e0c67 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 2f144e2cf436..869de5f59e73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_TXRX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 58b912653ac2..03b4ec630432 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */ +/* Marvell RVU Virtual Function ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/etherdevice.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6475ba35cf6b..a5b9f65db23c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -18,12 +18,39 @@ enum dr_action_valid_state { DR_ACTION_STATE_ENCAP, DR_ACTION_STATE_DECAP, DR_ACTION_STATE_MODIFY_HDR, - DR_ACTION_STATE_MODIFY_VLAN, + DR_ACTION_STATE_POP_VLAN, + DR_ACTION_STATE_PUSH_VLAN, DR_ACTION_STATE_NON_TERM, DR_ACTION_STATE_TERM, DR_ACTION_STATE_MAX, }; +static const char * const action_type_to_str[] = { + [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2", + [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3", + [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP", + [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP", + [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT", + [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR", + [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG", + [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR", + [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", + [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", + [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", + [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", + [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", + [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", +}; + +static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) +{ + if (action_id > DR_ACTION_TYP_MAX) + action_id = DR_ACTION_TYP_MAX; + return action_type_to_str[action_id]; +} + static const enum dr_action_valid_state next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { [DR_ACTION_DOMAIN_NIC_INGRESS] = { @@ -39,8 +66,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -53,7 +82,8 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -73,20 +103,31 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, @@ -99,8 +140,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -115,8 +158,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -132,14 +183,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -152,8 +214,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -170,8 +234,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_DECAP] = { @@ -180,11 +246,12 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -203,13 +270,26 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, @@ -226,8 +306,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -244,8 +326,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -262,15 +353,27 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -285,7 +388,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -314,6 +419,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type case DR_ACTION_REFORMAT_TYP_INSERT_HDR: *action_type = DR_ACTION_TYP_INSERT_HDR; break; + case DR_ACTION_REFORMAT_TYP_REMOVE_HDR: + *action_type = DR_ACTION_TYP_REMOVE_HDR; + break; default: return -EINVAL; } @@ -326,7 +434,7 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type * the new size of the STEs array, rule with actions. */ static void dr_actions_apply(struct mlx5dr_domain *dmn, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, u8 *action_type_set, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, @@ -335,7 +443,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; u32 added_stes = 0; - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set, last_ste, attr, &added_stes); else @@ -347,7 +455,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, static enum dr_action_domain dr_action_get_action_domain(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type) + enum mlx5dr_domain_nic_type nic_type) { switch (domain) { case MLX5DR_DOMAIN_TYPE_NIC_RX: @@ -355,7 +463,7 @@ dr_action_get_action_domain(enum mlx5dr_domain_type domain, case MLX5DR_DOMAIN_TYPE_NIC_TX: return DR_ACTION_DOMAIN_NIC_EGRESS; case MLX5DR_DOMAIN_TYPE_FDB: - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) return DR_ACTION_DOMAIN_FDB_INGRESS; return DR_ACTION_DOMAIN_FDB_EGRESS; default: @@ -421,6 +529,18 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, return 0; } +static void dr_action_print_sequence(struct mlx5dr_domain *dmn, + struct mlx5dr_action *actions[], + int last_idx) +{ + int i; + + for (i = 0; i <= last_idx; i++) + mlx5dr_err(dmn, "< %s (%d) > ", + dr_action_id_to_str(actions[i]->action_type), + actions[i]->action_type); +} + #define WITH_VLAN_NUM_HW_ACTIONS 6 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, @@ -431,7 +551,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, u32 *new_hw_ste_arr_sz) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; - bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 action_type_set[DR_ACTION_TYP_MAX] = {}; struct mlx5dr_ste_actions_attr attr = {}; @@ -445,7 +565,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.gvmi = dmn->info.caps.gvmi; attr.hit_gvmi = dmn->info.caps.gvmi; attr.final_icm_addr = nic_dmn->default_icm_addr; - action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); + action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type); for (i = 0; i < num_actions; i++) { struct mlx5dr_action_dest_tbl *dest_tbl; @@ -467,11 +587,11 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (dest_tbl->tbl->dmn != dmn) { mlx5dr_err(dmn, "Destination table belongs to a different domain\n"); - goto out_invalid_arg; + return -EINVAL; } if (dest_tbl->tbl->level <= matcher->tbl->level) { - mlx5_core_warn_once(dmn->mdev, - "Connecting table to a lower/same level destination table\n"); + mlx5_core_dbg_once(dmn->mdev, + "Connecting table to a lower/same level destination table\n"); mlx5dr_dbg(dmn, "Connecting table at level %d to a destination table at level %d\n", matcher->tbl->level, @@ -509,7 +629,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, break; case DR_ACTION_TYP_QP: mlx5dr_info(dmn, "Domain doesn't support QP\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; case DR_ACTION_TYP_CTR: attr.ctr_id = action->ctr->ctr_id + action->ctr->offeset; @@ -536,7 +656,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (rx_rule && !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) { mlx5dr_info(dmn, "Device doesn't support Encap on RX\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; } attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; @@ -549,48 +669,66 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { - /* Loopback on WIRE vport is not supported */ - if (action->vport->caps->num == WIRE_PORT) - goto out_invalid_arg; - + if (action->vport->caps->num == WIRE_PORT) { + mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); + return -EOPNOTSUPP; + } attr.final_icm_addr = action->vport->caps->icm_address_rx; } else { attr.final_icm_addr = action->vport->caps->icm_address_tx; } break; case DR_ACTION_TYP_POP_VLAN: + if (!rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_TX_POP)) { + mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; attr.vlans.count++; break; case DR_ACTION_TYP_PUSH_VLAN: + if (rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_RX_PUSH)) { + mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; - if (attr.vlans.count == MLX5DR_MAX_VLANS) + if (attr.vlans.count == MLX5DR_MAX_VLANS) { + mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n"); return -EINVAL; + } attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr; break; case DR_ACTION_TYP_INSERT_HDR: + case DR_ACTION_TYP_REMOVE_HDR: attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; attr.reformat.param_0 = action->reformat->param_0; attr.reformat.param_1 = action->reformat->param_1; break; default: - goto out_invalid_arg; + mlx5dr_err(dmn, "Unsupported action type %d\n", action_type); + return -EINVAL; } /* Check action duplication */ if (++action_type_set[action_type] > max_actions_type) { mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n", action_type, max_actions_type); - goto out_invalid_arg; + return -EINVAL; } /* Check action state machine is valid */ if (dr_action_validate_and_get_next_state(action_domain, action_type, &state)) { - mlx5dr_err(dmn, "Invalid action sequence provided\n"); + mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:", + attr.gvmi, rx_rule); + dr_action_print_sequence(dmn, actions, i); return -EOPNOTSUPP; } } @@ -614,16 +752,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } dr_actions_apply(dmn, - nic_dmn->ste_type, + nic_dmn->type, action_type_set, last_ste, &attr, new_hw_ste_arr_sz); return 0; - -out_invalid_arg: - return -EINVAL; } static unsigned int action_size[DR_ACTION_TYP_MAX] = { @@ -638,6 +773,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = { [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport), [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan), [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat), [DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler), }; @@ -709,7 +845,8 @@ dec_ref: struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests) + u32 num_of_dests, + bool ignore_flow_level) { struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; @@ -776,7 +913,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, num_of_dests, reformat_req, &action->dest_tbl->fw_tbl.id, - &action->dest_tbl->fw_tbl.group_id); + &action->dest_tbl->fw_tbl.group_id, + ignore_flow_level); if (ret) goto free_action; @@ -884,11 +1022,23 @@ dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type, size_t data_sz, void *data) { - if ((!data && data_sz) || (data && !data_sz) || - ((reformat_param_0 || reformat_param_1) && - reformat_type != DR_ACTION_TYP_INSERT_HDR) || - reformat_type > DR_ACTION_TYP_INSERT_HDR) { - mlx5dr_dbg(dmn, "Invalid reformat parameter!\n"); + if (reformat_type == DR_ACTION_TYP_INSERT_HDR) { + if ((!data && data_sz) || (data && !data_sz) || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n"); + goto out_err; + } + } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) { + if (data || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n"); + goto out_err; + } + } else if (reformat_param_0 || reformat_param_1 || + reformat_type > DR_ACTION_TYP_REMOVE_HDR) { + mlx5dr_dbg(dmn, "Invalid reformat parameters\n"); goto out_err; } @@ -987,7 +1137,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, return 0; } case DR_ACTION_TYP_INSERT_HDR: - { ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, MLX5_REFORMAT_TYPE_INSERT_HDR, reformat_param_0, @@ -1002,7 +1151,12 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, action->reformat->param_0 = reformat_param_0; action->reformat->param_1 = reformat_param_1; return 0; - } + case DR_ACTION_TYP_REMOVE_HDR: + action->reformat->id = 0; + action->reformat->size = data_sz; + action->reformat->param_0 = reformat_param_0; + action->reformat->param_1 = reformat_param_1; + return 0; default: mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type); return -EINVAL; @@ -1658,6 +1812,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) } break; case DR_ACTION_TYP_TNL_L2_TO_L2: + case DR_ACTION_TYP_REMOVE_HDR: refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 54e1f5438bbe..56307283bf9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -655,6 +655,7 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level); if (ft->vport) { MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 7091b1be84ef..0fe159809ba1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -245,7 +245,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address; dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address; break; @@ -254,7 +254,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address; dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address; break; @@ -265,8 +265,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb)) return -ENOTSUPP; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); if (!vport_cap) { mlx5dr_err(dmn, "Failed to get esw manager vport\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 7ccfd40586ce..0d6f86eb248b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -103,7 +103,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id) + u32 *group_id, + bool ignore_flow_level) { struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_fte_info fte_info = {}; @@ -137,6 +138,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, fte_info.dests_size = num_dest; fte_info.val = val; fte_info.dest_arr = dest; + fte_info.ignore_flow_level = ignore_flow_level; ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 6f6191d1d5a6..b5409cc021d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -396,13 +396,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_match_param mask = {}; + bool allow_empty_match = false; struct mlx5dr_ste_build *sb; bool inner, rx; int idx = 0; int ret, i; sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv]; - rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; /* Create a temporary mask to track and clear used mask fields */ if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER) @@ -428,6 +429,16 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (ret) return ret; + /* Optimize RX pipe by reducing source port match, since + * the FDB RX part is connected only to the wire. + */ + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB && + rx && mask.misc.source_port) { + mask.misc.source_port = 0; + mask.misc.source_eswitch_owner_vhca_id = 0; + allow_empty_match = true; + } + /* Outer */ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER | DR_MATCHER_CRITERIA_MISC | @@ -619,7 +630,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, } /* Empty matcher, takes all */ - if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) + if ((!idx && allow_empty_match) || + matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); if (idx == 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 43356fad53de..a1c8ac0ecc23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -81,6 +81,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher, } ste->ste_chain_location = orig_ste->ste_chain_location; + ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste; /* In collision entry, all members share the same miss_list_head */ ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste); @@ -185,6 +186,9 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, if (!new_ste) return NULL; + /* Update collision pointing STE */ + new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste; + /* In collision entry, all members share the same miss_list_head */ new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); @@ -212,7 +216,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, new_ste->next_htbl = cur_ste->next_htbl; new_ste->ste_chain_location = cur_ste->ste_chain_location; - if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location)) + if (new_ste->next_htbl) new_ste->next_htbl->pointing_ste = new_ste; /* We need to copy the refcount since this ste @@ -220,10 +224,8 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, */ new_ste->refcount = cur_ste->refcount; - /* Link old STEs rule_mem list to the new ste */ - mlx5dr_rule_update_rule_member(cur_ste, new_ste); - INIT_LIST_HEAD(&new_ste->rule_list); - list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list); + /* Link old STEs rule to the new ste */ + mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false); } static struct mlx5dr_ste * @@ -404,7 +406,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, new_htbl, formatted_ste, &info); @@ -581,34 +583,66 @@ free_action_members: return -ENOMEM; } -/* While the pointer of ste is no longer valid, like while moving ste to be - * the first in the miss_list, and to be in the origin table, - * all rule-members that are attached to this ste should update their ste member - * to the new pointer - */ -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, - struct mlx5dr_ste *new_ste) +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force) +{ + /* Update rule member is usually done for the last STE or during rule + * creation to recover from mid-creation failure (for this peruse the + * force flag is used) + */ + if (ste->next_htbl && !force) + return; + + /* Update is required since each rule keeps track of its last STE */ + ste->rule_rx_tx = nic_rule; + nic_rule->last_rule_ste = ste; +} + +static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste) +{ + struct mlx5dr_ste *first_ste; + + first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste), + struct mlx5dr_ste, miss_list_node); + + return first_ste->htbl->pointing_ste; +} + +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes) { - struct mlx5dr_rule_member *rule_mem; + bool first = false; + + *num_of_stes = 0; + + if (!curr_ste) + return -ENOENT; + + /* Iterate from last to first */ + while (!first) { + first = curr_ste->ste_chain_location == 1; + ste_arr[*num_of_stes] = curr_ste; + *num_of_stes += 1; + curr_ste = dr_rule_get_pointed_ste(curr_ste); + } - list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) - rule_mem->ste = new_ste; + return 0; } static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule) { - struct mlx5dr_rule_member *rule_mem; - struct mlx5dr_rule_member *tmp_mem; + struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES]; + struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste; + int i; - if (list_empty(&nic_rule->rule_members_list)) + if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i)) return; - list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) { - list_del(&rule_mem->list); - list_del(&rule_mem->use_ste_list); - mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher); - kvfree(rule_mem); - } + + while (i--) + mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher); } static u16 dr_get_bits_per_mask(u16 byte_mask) @@ -628,43 +662,25 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, struct mlx5dr_domain_rx_tx *nic_dmn) { struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; + int threshold; if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size) return false; - if (!ctrl->may_grow) + if (!mlx5dr_ste_htbl_may_grow(htbl)) return false; if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size) return false; - if (ctrl->num_of_collisions >= ctrl->increase_threshold && - (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) + threshold = mlx5dr_ste_htbl_increase_threshold(htbl); + if (ctrl->num_of_collisions >= threshold && + (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold) return true; return false; } -static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, - struct mlx5dr_ste *ste) -{ - struct mlx5dr_rule_member *rule_mem; - - rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL); - if (!rule_mem) - return -ENOMEM; - - INIT_LIST_HEAD(&rule_mem->list); - INIT_LIST_HEAD(&rule_mem->use_ste_list); - - rule_mem->ste = ste; - list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); - - list_add_tail(&rule_mem->use_ste_list, &ste->rule_list); - - return 0; -} - static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule, struct list_head *send_ste_list, @@ -679,15 +695,13 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 *curr_hw_ste, *prev_hw_ste; struct mlx5dr_ste *action_ste; - int i, k, ret; + int i, k; /* Two cases: * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added * to support the action. */ - if (num_of_builders == new_hw_ste_arr_sz) - return 0; for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) { curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE; @@ -700,6 +714,10 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_get(action_ste); + action_ste->htbl->pointing_ste = last_ste; + last_ste->next_htbl = action_ste->htbl; + last_ste = action_ste; + /* While free ste we go over the miss list, so add this ste to the list */ list_add_tail(&action_ste->miss_list_node, mlx5dr_ste_get_miss_list(action_ste)); @@ -713,21 +731,19 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx, prev_hw_ste, action_ste->htbl); - ret = dr_rule_add_member(nic_rule, action_ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member\n"); - goto free_ste_info; - } + + mlx5dr_rule_set_last_member(nic_rule, action_ste, true); + mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, curr_hw_ste, ste_info_arr[k], send_ste_list, false); } + last_ste->next_htbl = NULL; + return 0; -free_ste_info: - kfree(ste_info_arr[k]); err_exit: mlx5dr_ste_put(action_ste, matcher, nic_matcher); return -ENOMEM; @@ -1015,12 +1031,12 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec) } static bool dr_rule_skip(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value, u32 flow_source) { - bool rx = ste_type == MLX5DR_STE_TYPE_RX; + bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; if (domain != MLX5DR_DOMAIN_TYPE_FDB) return false; @@ -1065,9 +1081,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, nic_matcher = nic_rule->nic_matcher; nic_dmn = nic_matcher->nic_tbl->nic_dmn; - INIT_LIST_HEAD(&nic_rule->rule_members_list); - - if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param, + if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param, rule->flow_source)) return 0; @@ -1121,14 +1135,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, cur_htbl = ste->next_htbl; - /* Keep all STEs in the rule struct */ - ret = dr_rule_add_member(nic_rule, ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i); - goto free_ste; - } - mlx5dr_ste_get(ste); + mlx5dr_rule_set_last_member(nic_rule, ste, true); } /* Connect actions */ @@ -1153,8 +1161,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, return 0; -free_ste: - mlx5dr_ste_put(ste, matcher, nic_matcher); free_rule: dr_rule_clean_rule_members(rule, nic_rule); /* Clean all ste_info's */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 8a1623a4d8bc..bfb14b4b1906 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, do { ne = dr_poll_cq(send_ring->cq, 1); - if (ne < 0) + if (unlikely(ne < 0)) { + mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited", + send_ring->qp->qpn); + send_ring->err_state = true; return ne; - else if (ne == 1) + } else if (ne == 1) { send_ring->pending_wqe -= send_ring->signal_th; + } } while (is_drain && send_ring->pending_wqe); return 0; @@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, u32 buff_offset; int ret; + if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + send_ring->err_state)) { + mlx5_core_dbg_once(dmn->mdev, + "Skipping post send: QP err state: %d, device state: %d\n", + send_ring->err_state, dmn->mdev->state); + return 0; + } + spin_lock(&send_ring->lock); ret = dr_handle_pending_wc(dmn, send_ring); @@ -620,6 +632,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); + MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9b1529137cba..1cdfe4fccc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -172,9 +172,6 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) dst->next_htbl->pointing_ste = dst; dst->refcount = src->refcount; - - INIT_LIST_HEAD(&dst->rule_list); - list_splice_tail_init(&src->rule_list, &dst->rule_list); } /* Free ste which is the head and the only one in miss_list */ @@ -233,12 +230,12 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, /* Remove from the miss_list the next_ste before copy */ list_del_init(&next_ste->miss_list_node); - /* All rule-members that use next_ste should know about that */ - mlx5dr_rule_update_rule_member(next_ste, ste); - /* Move data from next into ste */ dr_ste_replace(ste, next_ste); + /* Update the rule on STE change */ + mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false); + /* Copy all 64 hw_ste bytes */ memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); sb_idx = ste->ste_chain_location - 1; @@ -382,14 +379,15 @@ void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, /* Init one ste as a pattern for ste data array */ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info) { + bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_ste ste = {}; - ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); + ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi); ste.hw_ste = formatted_ste; if (connect_info->type == CONNECT_HIT) @@ -408,7 +406,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, htbl, formatted_ste, connect_info); @@ -466,21 +464,6 @@ free_table: return -ENOENT; } -static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) -{ - struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; - int num_of_entries; - - htbl->ctrl.may_grow = true; - - if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) - htbl->ctrl.may_grow = false; - - /* Threshold is 50%, one is added to table of size 1 */ - num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); - ctrl->increase_threshold = (num_of_entries + 1) / 2; -} - struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size, u16 lu_type, u16 byte_mask) @@ -513,11 +496,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); - INIT_LIST_HEAD(&ste->rule_list); } htbl->chunk_size = chunk_size; - dr_ste_set_ctrl(htbl); return htbl; out_free_htbl: @@ -649,6 +630,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, u8 *ste_arr) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_build *sb; @@ -663,7 +645,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, for (i = 0; i < nic_matcher->num_of_builders; i++) { ste_ctx->ste_init(ste_arr, sb->lu_type, - nic_dmn->ste_type, + is_rx, dmn->info.caps.gvmi); mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 12a8bbbf944b..2d52d065dc8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -146,7 +146,7 @@ struct mlx5dr_ste_ctx { /* Getters and Setters */ void (*ste_init)(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi); + bool is_rx, u16 gvmi); void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type); u16 (*get_next_lu_type)(u8 *hw_ste_p); void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index e4dd4eed5aee..9c704bce3c12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -8,6 +8,12 @@ #define SVLAN_ETHERTYPE 0x88a8 #define DR_STE_ENABLE_FLOW_TAG BIT(31) +enum dr_ste_v0_entry_type { + DR_STE_TYPE_TX = 1, + DR_STE_TYPE_RX = 2, + DR_STE_TYPE_MODIFY_PKT = 6, +}; + enum dr_ste_v0_action_tunl { DR_STE_TUNL_ACTION_NONE = 0, DR_STE_TUNL_ACTION_ENABLE = 1, @@ -292,8 +298,8 @@ static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) +static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { dr_ste_v0_set_entry_type(hw_ste_p, entry_type); dr_ste_v0_set_lu_type(hw_ste_p, lu_type); @@ -307,6 +313,15 @@ static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); } +static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, + bool is_rx, u16 gvmi) +{ + enum dr_ste_v0_entry_type entry_type; + + entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX; + dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi); +} + static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) { MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, @@ -380,13 +395,13 @@ static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, static void dr_ste_v0_arr_init_next(u8 **last_ste, u32 *added_stes, - enum mlx5dr_ste_entry_type entry_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { (*added_stes)++; *last_ste += DR_STE_SIZE; - dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, - entry_type, gvmi); + dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, + entry_type, gvmi); } static void @@ -404,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, * modify headers for outer headers only */ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, attr->modify_index); @@ -417,7 +432,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_push_vlan(last_ste, @@ -435,7 +450,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_PUSH_VLAN]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_encap(last_ste, @@ -469,7 +484,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v0_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan); dr_ste_v0_set_rewrite_actions(last_ste, attr->decap_actions, @@ -488,7 +503,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_set_rx_pop_vlan(last_ste); @@ -496,13 +511,13 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_MODIFY_PKT, + DR_STE_TYPE_MODIFY_PKT, attr->gvmi); else - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, @@ -510,10 +525,10 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_TAG]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag); @@ -1157,6 +1172,7 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, u8 *tag) { struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); @@ -1168,6 +1184,11 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); + if (sb->inner) + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label); + else + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label); + if (spec->tcp_flags) { DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); spec->tcp_flags = 0; @@ -1772,7 +1793,7 @@ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1802,7 +1823,7 @@ static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1829,7 +1850,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4aaca8eb7597..b2481c99da79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -322,7 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) } static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) + bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -402,8 +402,23 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, - u32 vlan_hdr) +static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, + u8 anchor, u8 offset, + int size) +{ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor); + + /* The hardware expects here size and offset in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, + u32 vlan_hdr) { MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); @@ -416,7 +431,7 @@ static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -503,13 +518,28 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; + bool allow_modify_hdr = true; bool allow_encap = true; + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + allow_modify_hdr = false; + } + if (action_type_set[DR_ACTION_TYP_CTR]) dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) { dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); action = MLX5_ADDR_OF(ste_mask_and_match_v1, @@ -534,7 +564,8 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]); + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -579,6 +610,18 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -635,7 +678,7 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, allow_ctr = false; } - dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count); + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; } @@ -656,6 +699,26 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action += DR_STE_ACTION_DOUBLE_SZ; } + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ || + !allow_modify_hdr) { + dr_ste_v1_arr_init_next_match(&last_ste, + added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + } + if (action_type_set[DR_ACTION_TYP_CTR]) { /* Counter action set after decap and before insert_hdr * to exclude decaped / encaped header respectively. @@ -714,6 +777,20 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = true; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -1844,7 +1921,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1868,7 +1945,7 @@ static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1895,7 +1972,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1960,7 +2037,9 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_byte_mask = &dr_ste_v1_set_byte_mask, .get_byte_mask = &dr_ste_v1_get_byte_mask, /* Actions */ - .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, .set_actions_rx = &dr_ste_v1_set_actions_rx, .set_actions_tx = &dr_ste_v1_set_actions_tx, .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index f5e93fa87aff..b20e8aabb861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -83,15 +83,14 @@ enum { DR_STE_SIZE_CTRL = 32, DR_STE_SIZE_TAG = 16, DR_STE_SIZE_MASK = 16, -}; - -enum { DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK, }; enum mlx5dr_ste_ctx_action_cap { DR_STE_CTX_ACTION_CAP_NONE = 0, - DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0, + DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0, + DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1, + DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2, }; enum { @@ -124,6 +123,7 @@ enum mlx5dr_action_type { DR_ACTION_TYP_POP_VLAN, DR_ACTION_TYP_PUSH_VLAN, DR_ACTION_TYP_INSERT_HDR, + DR_ACTION_TYP_REMOVE_HDR, DR_ACTION_TYP_SAMPLER, DR_ACTION_TYP_MAX, }; @@ -140,6 +140,7 @@ struct mlx5dr_icm_buddy_mem; struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; +struct mlx5dr_rule_rx_tx; struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste_ctx; @@ -151,14 +152,14 @@ struct mlx5dr_ste { /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; - /* each rule member that uses this ste attached here */ - struct list_head rule_list; - /* this ste is member of htbl */ struct mlx5dr_ste_htbl *htbl; struct mlx5dr_ste_htbl *next_htbl; + /* The rule this STE belongs to */ + struct mlx5dr_rule_rx_tx *rule_rx_tx; + /* this ste is part of a rule, located in ste's chain */ u8 ste_chain_location; }; @@ -171,8 +172,6 @@ struct mlx5dr_ste_htbl_ctrl { /* total number of collisions entries attached to this table */ unsigned int num_of_collisions; - unsigned int increase_threshold; - u8 may_grow:1; }; struct mlx5dr_ste_htbl { @@ -804,10 +803,15 @@ struct mlx5dr_cmd_caps { u8 isolate_vl_tc:1; }; +enum mlx5dr_domain_nic_type { + DR_DOMAIN_NIC_TYPE_RX, + DR_DOMAIN_NIC_TYPE_TX, +}; + struct mlx5dr_domain_rx_tx { u64 drop_icm_addr; u64 default_icm_addr; - enum mlx5dr_ste_entry_type ste_type; + enum mlx5dr_domain_nic_type type; struct mutex mutex; /* protect rx/tx domain */ }; @@ -885,14 +889,6 @@ struct mlx5dr_matcher { struct mlx5dv_flow_matcher *dv_matcher; }; -struct mlx5dr_rule_member { - struct mlx5dr_ste *ste; - /* attached to mlx5dr_rule via this */ - struct list_head list; - /* attached to mlx5dr_ste via this */ - struct list_head use_ste_list; -}; - struct mlx5dr_ste_action_modify_field { u16 hw_field; u8 start; @@ -993,8 +989,8 @@ struct mlx5dr_htbl_connect_info { }; struct mlx5dr_rule_rx_tx { - struct list_head rule_members_list; struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_ste *last_rule_ste; }; struct mlx5dr_rule { @@ -1005,8 +1001,12 @@ struct mlx5dr_rule { u32 flow_source; }; -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, - struct mlx5dr_ste *ste); +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force); +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes); struct mlx5dr_icm_chunk { struct mlx5dr_icm_buddy_mem *buddy_mem; @@ -1083,6 +1083,25 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, return entry_size * num_of_entries; } +static inline int +mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); + + /* Threshold is 50%, one is added to table of size 1 */ + return (num_of_entries + 1) / 2; +} + +static inline bool +mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) +{ + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) + return false; + + return true; +} + static inline struct mlx5dr_cmd_vport_cap * mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) { @@ -1216,7 +1235,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, bool update_hw_ste); void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info); @@ -1282,6 +1301,7 @@ struct mlx5dr_send_ring { u8 sync_buff[MIN_READ_SYNC]; struct mlx5dr_mr *sync_mr; spinlock_t lock; /* Protect the data path of the send ring */ + bool err_state; /* send_ring is not usable in err state */ }; int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); @@ -1333,6 +1353,7 @@ struct mlx5dr_cmd_fte_info { u32 *val; struct mlx5_flow_act action; struct mlx5dr_cmd_flow_destination_hw_info *dest_arr; + bool ignore_flow_level; }; int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, @@ -1362,7 +1383,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id); + u32 *group_id, + bool ignore_flow_level); void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, u32 group_id); #endif /* _DR_TYPES_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index d5926dd7e972..7e58f4e594b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -133,6 +133,9 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); + return set_miss_action(ns, ft, next_ft); } @@ -487,9 +490,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { + bool ignore_flow_level = + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, term_actions, - num_term_actions); + num_term_actions, + ignore_flow_level); if (!tmp_action) { err = -EOPNOTSUPP; goto free_actions; @@ -557,6 +564,9 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns case MLX5_REFORMAT_TYPE_INSERT_HDR: dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR; break; + case MLX5_REFORMAT_TYPE_REMOVE_HDR: + dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR; + break; default: mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", params->type); @@ -615,15 +625,6 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n mlx5dr_action_destroy(modify_hdr->action.dr_action); } -static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *group, - int modify_mask, - struct fs_fte *fte) -{ - return -EOPNOTSUPP; -} - static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) @@ -648,6 +649,36 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, return 0; } +static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + int modify_mask, + struct fs_fte *fte) +{ + struct fs_fte fte_tmp = {}; + int ret; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); + + /* Backup current dr rule details */ + fte_tmp.fs_dr_rule = fte->fs_dr_rule; + memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule)); + + /* First add the new updated rule, then delete the old rule */ + ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte); + if (ret) + goto restore_fte; + + ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp); + WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n"); + return ret; + +restore_fte: + fte->fs_dr_rule = fte_tmp.fs_dr_rule; + return ret; +} + static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 9643ee647f57..d2a937f69784 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -8,12 +8,6 @@ enum { MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, }; -enum mlx5dr_ste_entry_type { - MLX5DR_STE_TYPE_TX = 1, - MLX5DR_STE_TYPE_RX = 2, - MLX5DR_STE_TYPE_MODIFY_PKT = 6, -}; - struct mlx5_ifc_ste_general_bits { u8 entry_type[0x4]; u8 reserved_at_4[0x4]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index bbfe101d4e57..c5a8b1601999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -27,6 +27,7 @@ enum mlx5dr_action_reformat_type { DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2, DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3, DR_ACTION_REFORMAT_TYP_INSERT_HDR, + DR_ACTION_REFORMAT_TYP_REMOVE_HDR, }; struct mlx5dr_match_parameters { @@ -94,7 +95,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests); + u32 num_of_dests, + bool ignore_flow_level); struct mlx5dr_action *mlx5dr_action_create_drop(void); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 9aac647290f7..0d6858ab511c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -15,6 +15,7 @@ static void ionic_watchdog_cb(struct timer_list *t) { struct ionic *ionic = from_timer(ionic, t, watchdog_timer); struct ionic_lif *lif = ionic->lif; + struct ionic_deferred_work *work; int hb; mod_timer(&ionic->watchdog_timer, @@ -31,6 +32,18 @@ static void ionic_watchdog_cb(struct timer_list *t) if (hb >= 0 && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + + if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } } void ionic_init_devinfo(struct ionic *ionic) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index f52c47a71f4b..23c9e196a784 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -30,9 +30,6 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { */ }; -static void ionic_lif_rx_mode(struct ionic_lif *lif); -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); static void ionic_link_status_check(struct ionic_lif *lif); static void ionic_lif_handle_fw_down(struct ionic_lif *lif); static void ionic_lif_handle_fw_up(struct ionic_lif *lif); @@ -92,20 +89,21 @@ static void ionic_lif_deferred_work(struct work_struct *work) case IONIC_DW_TYPE_RX_MODE: ionic_lif_rx_mode(lif); break; - case IONIC_DW_TYPE_RX_ADDR_ADD: - ionic_lif_addr_add(lif, w->addr); - break; - case IONIC_DW_TYPE_RX_ADDR_DEL: - ionic_lif_addr_del(lif, w->addr); - break; case IONIC_DW_TYPE_LINK_STATUS: ionic_link_status_check(lif); break; case IONIC_DW_TYPE_LIF_RESET: - if (w->fw_status) + if (w->fw_status) { ionic_lif_handle_fw_up(lif); - else + } else { ionic_lif_handle_fw_down(lif); + + /* Fire off another watchdog to see + * if the FW is already back rather than + * waiting another whole cycle + */ + mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); + } break; default: break; @@ -851,10 +849,8 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_txq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; @@ -896,9 +892,6 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -909,7 +902,6 @@ err_qcq_init: ionic_qcq_free(lif, txq); devm_kfree(lif->ionic->dev, txq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -921,10 +913,8 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_rxq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; @@ -962,9 +952,6 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -975,7 +962,6 @@ err_qcq_init: ionic_qcq_free(lif, rxq); devm_kfree(lif->ionic->dev, rxq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -1078,7 +1064,11 @@ static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) if (err && err != -EEXIST) return err; - return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) @@ -1251,7 +1241,7 @@ void ionic_get_stats64(struct net_device *netdev, ns->tx_errors = ns->tx_aborted_errors; } -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1261,27 +1251,83 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), }, }; + int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + bool mc = is_multicast_ether_addr(addr); struct ionic_rx_filter *f; - int err; + int err = 0; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - /* don't bother if we already have it */ spin_lock_bh(&lif->rx_filters.lock); f = ionic_rx_filter_by_addr(lif, addr); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } spin_unlock_bh(&lif->rx_filters.lock); - if (f) - return 0; + if (err) + return err; netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); - memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + */ + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + else + err = ionic_adminq_post_wait(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_NEW; + + spin_unlock_bh(&lif->rx_filters.lock); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + if (err == -ENOSPC) + return 0; + else + return err; + } + + if (mc) + lif->nmcast++; + else + lif->nucast++; + + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1291,6 +1337,7 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) }, }; struct ionic_rx_filter *f; + int state; int err; spin_lock_bh(&lif->rx_filters.lock); @@ -1303,65 +1350,37 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, f->filter_id); + state = f->state; ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); - spin_unlock_bh(&lif->rx_filters.lock); - - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; - return 0; -} + if (is_multicast_ether_addr(addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(addr) && lif->nucast) + lif->nucast--; -static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) -{ - unsigned int nmfilters; - unsigned int nufilters; + spin_unlock_bh(&lif->rx_filters.lock); - if (add) { - /* Do we have space for this filter? We test the counters - * here before checking the need for deferral so that we - * can return an overflow error to the stack. - */ - nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - - if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) - lif->nmcast++; - else if (!is_multicast_ether_addr(addr) && - lif->nucast < nufilters) - lif->nucast++; - else - return -ENOSPC; - } else { - if (is_multicast_ether_addr(addr) && lif->nmcast) - lif->nmcast--; - else if (!is_multicast_ether_addr(addr) && lif->nucast) - lif->nucast--; + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; } - netdev_dbg(lif->netdev, "rx_filter %s %pM\n", - add ? "add" : "del", addr); - if (add) - return ionic_lif_addr_add(lif, addr); - else - return ionic_lif_addr_del(lif, addr); - return 0; } static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); } static int ionic_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); } -static void ionic_lif_rx_mode(struct ionic_lif *lif) +void ionic_lif_rx_mode(struct ionic_lif *lif) { struct net_device *netdev = lif->netdev; unsigned int nfilters; @@ -1382,32 +1401,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; - /* sync unicast addresses - * next check to see if we're in an overflow state + /* sync the mac filters */ + ionic_rx_filter_sync(lif); + + /* check for overflow state * if so, we track that we overflowed and enable NIC PROMISC * else if the overflow is set and not needed * we remove our overflow flag and check the netdev flags * to see if we can disable NIC PROMISC */ - __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - if (netdev_uc_count(netdev) + 1 > nfilters) { + if ((lif->nucast + lif->nmcast) >= nfilters) { rx_mode |= IONIC_RX_MODE_F_PROMISC; + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; lif->uc_overflow = true; + lif->mc_overflow = true; } else if (lif->uc_overflow) { lif->uc_overflow = false; + lif->mc_overflow = false; if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; - } - - /* same for multicast */ - __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); - nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - if (netdev_mc_count(netdev) > nfilters) { - rx_mode |= IONIC_RX_MODE_F_ALLMULTI; - lif->mc_overflow = true; - } else if (lif->mc_overflow) { - lif->mc_overflow = false; if (!(nd_flags & IFF_ALLMULTI)) rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; } @@ -1450,28 +1463,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) mutex_unlock(&lif->config_lock); } -static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) +static void ionic_ndo_set_rx_mode(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_deferred_work *work; - if (!can_sleep) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "rxmode change dropped\n"); - return; - } - work->type = IONIC_DW_TYPE_RX_MODE; - netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - ionic_lif_rx_mode(lif); - } -} + /* Sync the kernel filter list with the driver filter list */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); -static void ionic_ndo_set_rx_mode(struct net_device *netdev) -{ - ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); + /* Shove off the rest of the rxmode work to the work task + * which will include syncing the filters to the firmware. + */ + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -1692,13 +1703,13 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa) if (!is_zero_ether_addr(netdev->dev_addr)) { netdev_info(netdev, "deleting mac addr %pM\n", netdev->dev_addr); - ionic_addr_del(netdev, netdev->dev_addr); + ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); } eth_commit_mac_addr_change(netdev, addr); netdev_info(netdev, "updating mac addr %pM\n", mac); - return ionic_addr_add(netdev, mac); + return ionic_lif_addr_add(netdev_priv(netdev), mac); } static void ionic_stop_queues_reconfig(struct ionic_lif *lif) @@ -1804,7 +1815,12 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, if (err) return err; - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, @@ -2107,7 +2123,7 @@ static int ionic_txrx_init(struct ionic_lif *lif) if (lif->netdev->features & NETIF_F_RXHASH) ionic_lif_rss_init(lif); - ionic_set_rx_mode(lif->netdev, CAN_SLEEP); + ionic_lif_rx_mode(lif); return 0; @@ -2205,9 +2221,11 @@ static int ionic_open(struct net_device *netdev) if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) netdev_info(netdev, "clearing broken state\n"); + mutex_lock(&lif->queue_lock); + err = ionic_txrx_alloc(lif); if (err) - return err; + goto err_unlock; err = ionic_txrx_init(lif); if (err) @@ -2228,12 +2246,21 @@ static int ionic_open(struct net_device *netdev) goto err_txrx_deinit; } + /* If hardware timestamping is enabled, but the queues were freed by + * ionic_stop, those need to be reallocated and initialized, too. + */ + ionic_lif_hwstamp_recreate_queues(lif); + + mutex_unlock(&lif->queue_lock); + return 0; err_txrx_deinit: ionic_txrx_deinit(lif); err_txrx_free: ionic_txrx_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); return err; } @@ -2253,9 +2280,11 @@ static int ionic_stop(struct net_device *netdev) if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return 0; + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); ionic_txrx_deinit(lif); ionic_txrx_free(lif); + mutex_unlock(&lif->queue_lock); return 0; } @@ -3195,7 +3224,7 @@ static int ionic_station_set(struct ionic_lif *lif) */ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); } else { /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); @@ -3212,7 +3241,7 @@ static int ionic_station_set(struct ionic_lif *lif) netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); return 0; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 69ab59fedb6c..4915184f3efb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -98,8 +98,6 @@ struct ionic_qcq { enum ionic_deferred_work_type { IONIC_DW_TYPE_RX_MODE, - IONIC_DW_TYPE_RX_ADDR_ADD, - IONIC_DW_TYPE_RX_ADDR_DEL, IONIC_DW_TYPE_LINK_STATUS, IONIC_DW_TYPE_LIF_RESET, }; @@ -147,6 +145,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, + IONIC_LIF_F_FILTER_SYNC_NEEDED, IONIC_LIF_F_FW_RESET, IONIC_LIF_F_SPLIT_INTR, IONIC_LIF_F_BROKEN, @@ -295,6 +294,10 @@ int ionic_lif_alloc(struct ionic *ionic); int ionic_lif_init(struct ionic_lif *lif); void ionic_lif_free(struct ionic_lif *lif); void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); + int ionic_lif_register(struct ionic_lif *lif); void ionic_lif_unregister(struct ionic_lif *lif); int ionic_lif_identify(struct ionic *ionic, u8 lif_type, @@ -303,6 +306,7 @@ int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void ionic_lif_hwstamp_replay(struct ionic_lif *lif); +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); @@ -312,6 +316,7 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif); void ionic_lif_free_phc(struct ionic_lif *lif); #else static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} +static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) { @@ -342,6 +347,7 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class); int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, const u8 *key, const u32 *indir); +void ionic_lif_rx_mode(struct ionic_lif *lif); int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 5f1e5b6e85c3..6f07bf509efe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -375,8 +375,8 @@ try_again: * heartbeat check but is still alive and will process this * request, so don't clean the dev_cmd in this case. */ - dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", - ionic_opcode_to_str(opcode), opcode); + dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", + ionic_opcode_to_str(opcode), opcode); return -ENXIO; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index afc45da399d4..eed2db69d708 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -194,7 +194,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, &config); + mutex_unlock(&lif->queue_lock); if (err) { netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); return err; @@ -213,11 +215,37 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif) if (!lif->phc || !lif->phc->ptp) return; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + mutex_unlock(&lif->queue_lock); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); } +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) +{ + int err; + + if (!lif->phc || !lif->phc->ptp) + return; + + mutex_lock(&lif->phc->config_lock); + + if (lif->phc->ts_config_tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err); + } + + if (lif->phc->ts_config_rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err); + } + + mutex_unlock(&lif->phc->config_lock); +} + int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) { struct hwtstamp_config config; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index d71316d9ded2..7e3a5634c161 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/dynamic_debug.h> #include <linux/etherdevice.h> +#include <linux/list.h> #include "ionic.h" #include "ionic_lif.h" @@ -120,11 +121,12 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx) + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state) { struct device *dev = lif->ionic->dev; struct ionic_rx_filter_add_cmd *ac; - struct ionic_rx_filter *f; + struct ionic_rx_filter *f = NULL; struct hlist_head *head; unsigned int key; @@ -133,9 +135,11 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, switch (le16_to_cpu(ac->match)) { case IONIC_RX_FILTER_MATCH_VLAN: key = le16_to_cpu(ac->vlan.vlan); + f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); break; case IONIC_RX_FILTER_MATCH_MAC: key = *(u32 *)ac->mac.addr; + f = ionic_rx_filter_by_addr(lif, ac->mac.addr); break; case IONIC_RX_FILTER_MATCH_MAC_VLAN: key = le16_to_cpu(ac->mac_vlan.vlan); @@ -147,12 +151,19 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, return -EINVAL; } - f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL); - if (!f) - return -ENOMEM; + if (f) { + /* remove from current linking so we can refresh it */ + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + } else { + f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC); + if (!f) + return -ENOMEM; + } f->flow_id = flow_id; f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->state = state; f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); @@ -160,8 +171,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); - spin_lock_bh(&lif->rx_filters.lock); - key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); head = &lif->rx_filters.by_hash[key]; hlist_add_head(&f->by_hash, head); @@ -170,8 +179,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, head = &lif->rx_filters.by_id[key]; hlist_add_head(&f->by_id, head); - spin_unlock_bh(&lif->rx_filters.lock); - return 0; } @@ -231,3 +238,121 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) return NULL; } + +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) +{ + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (mode == ADD_ADDR && !f) { + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_NEW); + if (err) { + spin_unlock_bh(&lif->rx_filters.lock); + return err; + } + + } else if (mode == ADD_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_OLD) + f->state = IONIC_FILTER_STATE_SYNCED; + + } else if (mode == DEL_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_NEW) + ionic_rx_filter_free(lif, f); + else if (f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_OLD; + } else if (mode == DEL_ADDR && !f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + spin_unlock_bh(&lif->rx_filters.lock); + + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + return 0; +} + +struct sync_item { + struct list_head list; + struct ionic_rx_filter f; +}; + +void ionic_rx_filter_sync(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct list_head sync_add_list; + struct list_head sync_del_list; + struct sync_item *sync_item; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + struct sync_item *spos; + unsigned int i; + + INIT_LIST_HEAD(&sync_add_list); + INIT_LIST_HEAD(&sync_del_list); + + clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + /* Copy the filters to be added and deleted + * into a separate local list that needs no locking. + */ + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + if (f->state == IONIC_FILTER_STATE_NEW || + f->state == IONIC_FILTER_STATE_OLD) { + sync_item = devm_kzalloc(dev, sizeof(*sync_item), + GFP_KERNEL); + if (!sync_item) + goto loop_out; + + sync_item->f = *f; + + if (f->state == IONIC_FILTER_STATE_NEW) + list_add(&sync_item->list, &sync_add_list); + else + list_add(&sync_item->list, &sync_del_list); + } + } + } +loop_out: + spin_unlock_bh(&lif->rx_filters.lock); + + /* If the add or delete fails, it won't get marked as sync'd + * and will be tried again in the next sync action. + * Do the deletes first in case we're in an overflow state and + * they can clear room for some new filters + */ + list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { + (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } + + list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { + (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); + + if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index 1ead48be3c83..a66e35f0833b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -5,10 +5,18 @@ #define _IONIC_RX_FILTER_H_ #define IONIC_RXQ_INDEX_ANY (0xFFFF) + +enum ionic_filter_state { + IONIC_FILTER_STATE_SYNCED, + IONIC_FILTER_STATE_NEW, + IONIC_FILTER_STATE_OLD, +}; + struct ionic_rx_filter { u32 flow_id; u32 filter_id; u16 rxq_index; + enum ionic_filter_state state; struct ionic_rx_filter_add_cmd cmd; struct hlist_node by_hash; struct hlist_node by_id; @@ -28,9 +36,13 @@ void ionic_rx_filter_replay(struct ionic_lif *lif); int ionic_rx_filters_init(struct ionic_lif *lif); void ionic_rx_filters_deinit(struct ionic_lif *lif); int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx); + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state); struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); +void ionic_rx_filter_sync(struct ionic_lif *lif); +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); +int ionic_rx_filters_need_sync(struct ionic_lif *lif); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6871d892eabf..15ef59aa34ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -615,7 +615,12 @@ static int qed_enable_msix(struct qed_dev *cdev, rc = cnt; } - if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4877cb88c31a..9837bdb89cd4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1866,6 +1866,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev) } edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) @@ -2434,7 +2435,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 1225d27330f8..46a6ff9a782d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2669,6 +2669,34 @@ static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); } +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { /* Don't enable ASPM in the chip if OS can't control ASPM */ @@ -2857,7 +2885,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); rtl_eri_set_bits(tp, 0x1d0, BIT(1)); rtl_reset_packet_filter(tp); rtl_eri_set_bits(tp, 0x1b0, BIT(4)); @@ -2914,8 +2941,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_hw_start_8168f(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); } static void rtl_hw_start_8411(struct rtl8169_private *tp) @@ -2932,8 +2957,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_pcie_state_l2l3_disable(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x0c00); } static void rtl_hw_start_8168g(struct rtl8169_private *tp) @@ -2950,7 +2973,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl_eri_set_bits(tp, 0x0d4, 0x1f80); rtl8168_config_eee_mac(tp); @@ -3181,7 +3203,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f00); rtl_eri_set_bits(tp, 0xdc, 0x001c); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3235,8 +3256,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f80); - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); @@ -3338,7 +3357,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f90); + rtl_eri_set_bits(tp, 0xd4, 0x0010); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3569,7 +3588,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); - r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3792,6 +3810,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) else rtl_hw_start_8168(tp); + rtl_enable_exit_l1(tp); rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 280ac0129572..ed817011a94a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -21,7 +21,6 @@ #include <linux/delay.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> -#include <linux/pm_runtime.h> #include "stmmac_platform.h" @@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) return ret; } - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - if (bsp_priv->integrated_phy) rk_gmac_integrated_phy_powerup(bsp_priv); @@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - struct device *dev = &gmac->pdev->dev; - if (gmac->integrated_phy) rk_gmac_integrated_phy_powerdown(gmac); - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index fcdb1d20389b..43eead726886 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) { if (stmmac_xdp_is_enabled(priv)) - return XDP_PACKET_HEADROOM + NET_IP_ALIGN; + return XDP_PACKET_HEADROOM; - return NET_SKB_PAD + NET_IP_ALIGN; + return 0; } void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b3fcf558603..ed0cd3920171 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4915,6 +4915,10 @@ read_again: prefetch(np); + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, @@ -4935,10 +4939,6 @@ read_again: continue; } - /* Ensure a valid XSK buffer before proceed */ - if (!buf->xdp) - break; - /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 4f3b6437b114..8160087ee92f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: - mutex_lock(&priv->plat->est->lock); - priv->plat->est->enable = false; - stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, - priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + if (priv->plat->est) { + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 105821b53020..2a616c6f7cd0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { - stmmac_disable_rx_queue(priv, queue); - stmmac_disable_tx_queue(priv, queue); napi_disable(&ch->rx_napi); napi_disable(&ch->tx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); } set_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rxtx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rxtx_napi); err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); if (err) @@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { + napi_disable(&ch->rxtx_napi); stmmac_disable_rx_queue(priv, queue); stmmac_disable_tx_queue(priv, queue); synchronize_rcu(); - napi_disable(&ch->rxtx_napi); } xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); @@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) clear_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rx_napi); - napi_enable(&ch->tx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); } return 0; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 1501e8906be4..a68a01d1b2b1 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9722,7 +9722,6 @@ static int niu_pci_init_one(struct pci_dev *pdev, struct net_device *dev; struct niu *np; int err; - u64 dma_mask; niu_driver_version(); @@ -9777,18 +9776,11 @@ static int niu_pci_init_one(struct pci_dev *pdev, PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); - dma_mask = DMA_BIT_MASK(44); - err = pci_set_dma_mask(pdev, dma_mask); - if (!err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (!err) dev->features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, dma_mask); - if (err) { - dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); - goto err_out_release_parent; - } - } if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 087f0af56c50..66d4e024d11e 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -354,9 +354,10 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) descr = card->rx_chain.head; do { if (descr->skb) { - pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, + dma_unmap_single(&card->pdev->dev, + descr->hwdescr->buf_addr, SPIDER_NET_MAX_FRAME, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); dev_kfree_skb(descr->skb); descr->skb = NULL; } @@ -411,9 +412,9 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, if (offset) skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* iommu-map the skb */ - buf = pci_map_single(card->pdev, descr->skb->data, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, descr->skb->data, + SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) @@ -653,8 +654,9 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, dma_addr_t buf; unsigned long flags; - buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); @@ -666,7 +668,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, descr = card->tx_chain.head; if (descr->next == chain->tail->prev) { spin_unlock_irqrestore(&chain->lock, flags); - pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf, skb->len, + DMA_TO_DEVICE); return -ENOMEM; } hwdescr = descr->hwdescr; @@ -822,8 +825,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* unmap the skb */ if (skb) { - pci_unmap_single(card->pdev, buf_addr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf_addr, skb->len, + DMA_TO_DEVICE); dev_consume_skb_any(skb); } } @@ -1165,8 +1168,8 @@ spider_net_decode_one_descr(struct spider_net_card *card) /* unmap descriptor */ hw_buf_addr = hwdescr->buf_addr; hwdescr->buf_addr = 0xffffffff; - pci_unmap_single(card->pdev, hw_buf_addr, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); + dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME, + DMA_FROM_DEVICE); if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 468ffe3d1707..0e878fa6e322 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -29,9 +29,9 @@ config IXP4XX_ETH on IXP4xx processor. config PTP_1588_CLOCK_IXP46X - tristate "Intel IXP46x as PTP clock" + bool "Intel IXP46x as PTP clock" depends on IXP4XX_ETH - depends on PTP_1588_CLOCK + depends on PTP_1588_CLOCK=y || PTP_1588_CLOCK=IXP4XX_ETH default y help This driver adds support for using the IXP46X as a PTP diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile index 607f91b1e878..e935f2a2979f 100644 --- a/drivers/net/ethernet/xscale/Makefile +++ b/drivers/net/ethernet/xscale/Makefile @@ -3,5 +3,9 @@ # Makefile for the Intel XScale IXP device drivers. # +# Keep this link order to avoid deferred probing +ifdef CONFIG_PTP_1588_CLOCK_IXP46X +obj-$(CONFIG_IXP4XX_ETH) += ptp_ixp46x.o +endif + obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o -obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h index d792130e27b0..ee9b93ded20a 100644 --- a/drivers/net/ethernet/xscale/ixp46x_ts.h +++ b/drivers/net/ethernet/xscale/ixp46x_ts.h @@ -62,7 +62,16 @@ struct ixp46x_ts_regs { #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) -/* The ptp_ixp46x module will set this variable */ -extern int ixp46x_phc_index; +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_IXP46X) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index); +#else +static inline int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) +{ + *regs = NULL; + *phc_index = -1; + + return -ENODEV; +} +#endif #endif diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index ff50305d6e13..931494cc1c39 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -37,7 +37,6 @@ #include <linux/module.h> #include <linux/soc/ixp4xx/npe.h> #include <linux/soc/ixp4xx/qmgr.h> -#include <mach/hardware.h> #include <linux/soc/ixp4xx/cpu.h> #include "ixp46x_ts.h" @@ -169,13 +168,15 @@ struct eth_regs { struct port { struct eth_regs __iomem *regs; + struct ixp46x_ts_regs __iomem *timesync_regs; + int phc_index; struct npe *npe; struct net_device *netdev; struct napi_struct napi; struct eth_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ - u32 desc_tab_phys; + dma_addr_t desc_tab_phys; int id; /* logical port ID */ int speed, duplex; u8 firmware[4]; @@ -295,7 +296,7 @@ static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; val = __raw_readl(®s->channel[ch].ch_event); @@ -340,7 +341,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; /* * This really stinks, but we have to poll for the Tx time stamp. @@ -375,6 +376,7 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) struct hwtstamp_config cfg; struct ixp46x_ts_regs *regs; struct port *port = netdev_priv(netdev); + int ret; int ch; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -383,8 +385,12 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (cfg.flags) /* reserved for future extensions */ return -EINVAL; + ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + if (ret) + return ret; + ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -850,14 +856,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) bytes = len; mem = skb->data; #else - offset = (int)skb->data & 3; /* keep 32-bit alignment */ + offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } - memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); @@ -988,25 +994,27 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } -int ixp46x_phc_index = -1; -EXPORT_SYMBOL_GPL(ixp46x_phc_index); - static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { - if (!cpu_is_ixp46x()) { + struct port *port = netdev_priv(dev); + + if (port->phc_index < 0) + ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + + info->phc_index = port->phc_index; + + if (info->phc_index < 0) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = ixp46x_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1481,6 +1489,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) port = netdev_priv(ndev); port->netdev = ndev; port->id = plat->npe; + port->phc_index = -1; /* Get the port resource and remap */ port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index a6fb88fd42f7..ecece21315c3 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -5,14 +5,16 @@ * Copyright (C) 2010 OMICRON electronics GmbH */ #include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/ptp_clock_kernel.h> +#include <linux/platform_device.h> #include <linux/soc/ixp4xx/cpu.h> #include <linux/module.h> #include <mach/ixp4xx-regs.h> @@ -21,10 +23,6 @@ #define DRIVER "ptp_ixp46x" #define N_EXT_TS 2 -#define MASTER_GPIO 8 -#define MASTER_IRQ 25 -#define SLAVE_GPIO 7 -#define SLAVE_IRQ 24 struct ixp_clock { struct ixp46x_ts_regs *regs; @@ -32,9 +30,11 @@ struct ixp_clock { struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; + int slave_irq; + int master_irq; }; -DEFINE_SPINLOCK(register_lock); +static DEFINE_SPINLOCK(register_lock); /* * Register access functions @@ -243,53 +243,38 @@ static const struct ptp_clock_info ptp_ixp_caps = { static struct ixp_clock ixp_clock; -static int setup_interrupt(int gpio) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) { - int irq; - int err; - - err = gpio_request(gpio, "ixp4-ptp"); - if (err) - return err; - - err = gpio_direction_input(gpio); - if (err) - return err; - - irq = gpio_to_irq(gpio); - if (irq < 0) - return irq; + *regs = ixp_clock.regs; + *phc_index = ptp_clock_index(ixp_clock.ptp_clock); - err = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); - if (err) { - pr_err("cannot set trigger type for irq %d\n", irq); - return err; - } - - err = request_irq(irq, isr, 0, DRIVER, &ixp_clock); - if (err) { - pr_err("request_irq failed for irq %d\n", irq); - return err; - } + if (!ixp_clock.ptp_clock) + return -EPROBE_DEFER; - return irq; + return 0; } +EXPORT_SYMBOL_GPL(ixp46x_ptp_find); -static void __exit ptp_ixp_exit(void) +/* Called from the registered devm action */ +static void ptp_ixp_unregister_action(void *d) { - free_irq(MASTER_IRQ, &ixp_clock); - free_irq(SLAVE_IRQ, &ixp_clock); - ixp46x_phc_index = -1; - ptp_clock_unregister(ixp_clock.ptp_clock); + struct ptp_clock *ptp_clock = d; + + ptp_clock_unregister(ptp_clock); + ixp_clock.ptp_clock = NULL; } -static int __init ptp_ixp_init(void) +static int ptp_ixp_probe(struct platform_device *pdev) { - if (!cpu_is_ixp46x()) - return -ENODEV; + struct device *dev = &pdev->dev; + int ret; - ixp_clock.regs = - (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + ixp_clock.regs = devm_platform_ioremap_resource(pdev, 0); + ixp_clock.master_irq = platform_get_irq(pdev, 0); + ixp_clock.slave_irq = platform_get_irq(pdev, 1); + if (IS_ERR(ixp_clock.regs) || + !ixp_clock.master_irq || !ixp_clock.slave_irq) + return -ENXIO; ixp_clock.caps = ptp_ixp_caps; @@ -298,32 +283,51 @@ static int __init ptp_ixp_init(void) if (IS_ERR(ixp_clock.ptp_clock)) return PTR_ERR(ixp_clock.ptp_clock); - ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock); + ret = devm_add_action_or_reset(dev, ptp_ixp_unregister_action, + ixp_clock.ptp_clock); + if (ret) { + dev_err(dev, "failed to install clock removal handler\n"); + return ret; + } __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); __raw_writel(1, &ixp_clock.regs->trgt_lo); __raw_writel(0, &ixp_clock.regs->trgt_hi); __raw_writel(TTIPEND, &ixp_clock.regs->event); - if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO); - goto no_master; - } - if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO); - goto no_slave; - } + ret = devm_request_irq(dev, ixp_clock.master_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.master_irq); + + ret = devm_request_irq(dev, ixp_clock.slave_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.slave_irq); return 0; -no_slave: - free_irq(MASTER_IRQ, &ixp_clock); -no_master: - ptp_clock_unregister(ixp_clock.ptp_clock); - return -ENODEV; } -module_init(ptp_ixp_init); -module_exit(ptp_ixp_exit); +static const struct of_device_id ptp_ixp_match[] = { + { + .compatible = "intel,ixp46x-ptp-timer", + }, + { }, +}; + +static struct platform_driver ptp_ixp_driver = { + .driver = { + .name = "ptp-ixp46x", + .of_match_table = ptp_ixp_match, + .suppress_bind_attrs = true, + }, + .probe = ptp_ixp_probe, +}; +module_platform_driver(ptp_ixp_driver); MODULE_AUTHOR("Richard Cochran <[email protected]>"); MODULE_DESCRIPTION("PTP clock using the IXP46X timer"); diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index f62e98fada1a..c5cb421f9890 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -1174,8 +1174,8 @@ static void send_queued_packets(struct s_smc *smc) txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue); - dma_address = pci_map_single(&bp->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); + dma_address = dma_map_single(&(&bp->pdev)->dev, skb->data, + skb->len, DMA_TO_DEVICE); if (frame_status & LAN_TX) { txd->txd_os.skb = skb; // save skb txd->txd_os.dma_addr = dma_address; // save dma mapping @@ -1184,8 +1184,8 @@ static void send_queued_packets(struct s_smc *smc) frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF); if (!(frame_status & LAN_TX)) { // local only frame - pci_unmap_single(&bp->pdev, dma_address, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&(&bp->pdev)->dev, dma_address, + skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&bp->DriverLock, Flags); @@ -1467,8 +1467,9 @@ void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag) if (r->rxd_os.skb && r->rxd_os.dma_addr) { int MaxFrameSize = bp->MaxFrameSize; - pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr, - MaxFrameSize, PCI_DMA_FROMDEVICE); + dma_unmap_single(&(&bp->pdev)->dev, + r->rxd_os.dma_addr, MaxFrameSize, + DMA_FROM_DEVICE); r->rxd_os.dma_addr = 0; } } @@ -1503,8 +1504,8 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd) txd->txd_os.skb = NULL; // release the DMA mapping - pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&(&smc->os.pdev)->dev, txd->txd_os.dma_addr, + skb->len, DMA_TO_DEVICE); txd->txd_os.dma_addr = 0; smc->os.MacStat.gen.tx_packets++; // Count transmitted packets. @@ -1707,10 +1708,9 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, skb_reserve(skb, 3); skb_put(skb, MaxFrameSize); v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, + v_addr, MaxFrameSize, + DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } else { // no skb available, use local buffer @@ -1723,10 +1723,8 @@ void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, // we use skb from old rxd rxd->rxd_os.skb = skb; v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr, + MaxFrameSize, DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize, @@ -1778,10 +1776,8 @@ void mac_drv_fill_rxd(struct s_smc *smc) skb_reserve(skb, 3); skb_put(skb, MaxFrameSize); v_addr = skb->data; - b_addr = pci_map_single(&smc->os.pdev, - v_addr, - MaxFrameSize, - PCI_DMA_FROMDEVICE); + b_addr = dma_map_single(&(&smc->os.pdev)->dev, v_addr, + MaxFrameSize, DMA_FROM_DEVICE); rxd->rxd_os.dma_addr = b_addr; } else { // no skb available, use local buffer @@ -1838,8 +1834,9 @@ void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, skfddi_priv *bp = &smc->os; int MaxFrameSize = bp->MaxFrameSize; - pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr, - MaxFrameSize, PCI_DMA_FROMDEVICE); + dma_unmap_single(&(&bp->pdev)->dev, + rxd->rxd_os.dma_addr, MaxFrameSize, + DMA_FROM_DEVICE); dev_kfree_skb(skb); rxd->rxd_os.skb = NULL; diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index 975f7f9bdf4c..d127eb6e9257 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -319,7 +319,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) u64_stats_init(&mhi_netdev->stats.tx_syncp); /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) goto out_err; diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index 11ff335d6228..b7a5ae20edd5 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -81,6 +81,8 @@ static struct phy_driver mtk_gephy_driver[] = { */ .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, .read_page = mtk_gephy_read_page, .write_page = mtk_gephy_write_page, }, @@ -93,6 +95,8 @@ static struct phy_driver mtk_gephy_driver[] = { */ .config_intr = genphy_no_config_intr, .handle_interrupt = genphy_handle_interrupt_no_ack, + .suspend = genphy_suspend, + .resume = genphy_resume, .read_page = mtk_gephy_read_page, .write_page = mtk_gephy_write_page, }, diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index e1994a246122..2a1e31defe71 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h @@ -184,6 +184,7 @@ struct asix_common_private { struct phy_device *phydev; u16 phy_addr; char phy_name[20]; + bool embd_phy; }; extern const struct driver_info ax88172a_info; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index cb01897c7a5d..30821f6a6d7a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -354,24 +354,23 @@ out: static int ax88772_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl; + int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5, in_pm); if (ret < 0) goto out; - embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; } - if (embd_phy) { + if (priv->embd_phy) { ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm); if (ret < 0) goto out; @@ -449,17 +448,16 @@ out: static int ax88772a_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl, phy14h, phy15h, phy16h; u8 chipcode = 0; + int ret; ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm); if (ret < 0) goto out; - embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy | + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy | AX_PHYSEL_SSEN, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); @@ -683,12 +681,6 @@ static int ax88772_init_phy(struct usbnet *dev) struct asix_common_private *priv = dev->driver_priv; int ret; - ret = asix_read_phy_addr(dev, true); - if (ret < 0) - return ret; - - priv->phy_addr = ret; - snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr); @@ -715,6 +707,12 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) struct asix_common_private *priv; int ret, i; + priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev->driver_priv = priv; + usbnet_get_endpoints(dev, intf); /* Maybe the boot loader passed the MAC address via device tree */ @@ -750,6 +748,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ + ret = asix_read_phy_addr(dev, true); + if (ret < 0) + return ret; + + priv->phy_addr = ret; + priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10); + asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK; @@ -768,12 +773,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->rx_urb_size = 2048; } - priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - dev->driver_priv = priv; - priv->presvd_phy_bmcr = 0; priv->presvd_phy_advertise = 0; if (chipcode == AX_AX88772_CHIPCODE) { @@ -812,6 +811,12 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) asix_rx_fixup_common_free(dev->driver_priv); } +static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + asix_rx_fixup_common_free(dev->driver_priv); + kfree(dev->driver_priv); +} + static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, @@ -1221,7 +1226,7 @@ static const struct driver_info ax88772b_info = { static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, - .unbind = ax88772_unbind, + .unbind = ax88178_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36dafcb3d04a..6a92a3fef75e 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -446,7 +446,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) write_mii_word(pegasus, 0, 0x1b, &auxmode); } - return 0; + return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; @@ -835,7 +835,7 @@ static int pegasus_open(struct net_device *net) if (!pegasus->rx_skb) goto exit; - res = set_registers(pegasus, EthID, 6, net->dev_addr); + set_registers(pegasus, EthID, 6, net->dev_addr); usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b4ae2ac8a249..271d38c1d9f8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -528,19 +528,20 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, * functions to perfectly solve these three problems at the same time. */ #define virtnet_xdp_get_sq(vi) ({ \ + int cpu = smp_processor_id(); \ struct netdev_queue *txq; \ typeof(vi) v = (vi); \ unsigned int qp; \ \ if (v->curr_queue_pairs > nr_cpu_ids) { \ qp = v->curr_queue_pairs - v->xdp_queue_pairs; \ - qp += smp_processor_id(); \ + qp += cpu; \ txq = netdev_get_tx_queue(v->dev, qp); \ __netif_tx_acquire(txq); \ } else { \ - qp = smp_processor_id() % v->curr_queue_pairs; \ + qp = cpu % v->curr_queue_pairs; \ txq = netdev_get_tx_queue(v->dev, qp); \ - __netif_tx_lock(txq, raw_smp_processor_id()); \ + __netif_tx_lock(txq, cpu); \ } \ v->sq + qp; \ }) diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index b137e7f34397..bd1ef6334997 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2504,8 +2504,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) goto free_data_skb; for (index = 0; index < num_pri_streams; index++) { - if (WARN_ON(!data_sync_bufs[index].skb)) + if (WARN_ON(!data_sync_bufs[index].skb)) { + ret = -ENOMEM; goto free_data_skb; + } ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, data_sync_bufs[index]. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index b4885a700296..b0a4ca3559fd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3351,7 +3351,8 @@ found: "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || - (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { + (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) || + (length > cptr)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2ca3b86714a9..172081ffe477 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1621,7 +1621,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah) ath9k_hw_gpio_request_out(ah, i, NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i))); - ath9k_hw_gpio_free(ah, i); } } @@ -2728,14 +2727,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type) static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out, const char *label) { + int err; + if (ah->caps.gpio_requested & BIT(gpio)) return; - /* may be requested by BSP, free anyway */ - gpio_free(gpio); - - if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label)) + err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label); + if (err) { + ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n", + gpio, err); return; + } ah->caps.gpio_requested |= BIT(gpio); } diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index d202f2128df2..ec913ec991f3 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -408,13 +408,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", ch); - if (wcn->sw_scan_opchannel == ch) { + if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) { /* If channel is the initial operating channel, we may * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + wcn->sw_scan_channel = 0; } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. @@ -422,6 +423,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); wcn36xx_smd_start_scan(wcn, ch); + wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -702,6 +704,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, wcn->sw_scan = true; wcn->sw_scan_vif = vif; + wcn->sw_scan_channel = 0; if (vif_priv->sta_assoc) wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn); else @@ -1500,6 +1503,13 @@ static int wcn36xx_probe(struct platform_device *pdev) goto out_wq; } + wcn->nv_file = WLAN_NV_FILE; + ret = of_property_read_string(wcn->dev->parent->of_node, "firmware-name", &wcn->nv_file); + if (ret < 0 && ret != -EINVAL) { + wcn36xx_err("failed to read \"firmware-name\" property: %d\n", ret); + goto out_wq; + } + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); if (IS_ERR(wcn->smd_channel)) { wcn36xx_err("failed to open WLAN_CTRL channel\n"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 0e3be17d8cea..57fa857b290b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -504,10 +504,10 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn) u16 fm_offset = 0; if (!wcn->nv) { - ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev); + ret = request_firmware(&wcn->nv, wcn->nv_file, wcn->dev); if (ret) { wcn36xx_err("Failed to load nv file %s: %d\n", - WLAN_NV_FILE, ret); + wcn->nv_file, ret); goto out; } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 1b831157ede1..cab196bb38cd 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) status.rate_idx = 0; } + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) + status.boottime_ns = ktime_get_boottime_ns(); + memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 6121d8a5641a..add6e527e833 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -199,6 +199,7 @@ struct wcn36xx { struct device *dev; struct list_head vif_list; + const char *nv_file; const struct firmware *nv; u8 fw_revision; @@ -246,6 +247,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; bool scan_aborted; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index cedba56fc448..f7b96cd69242 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1829,6 +1829,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; } break; + case WLAN_AKM_SUITE_FT_OVER_SAE: + val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT; + profile->is_ft = true; + if (sme->crypto.sae_pwd) { + brcmf_dbg(INFO, "using SAE offload\n"); + profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE; + } + break; default: bphy_err(drvr, "invalid cipher group (%d)\n", sme->crypto.cipher_group); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 5f6a418fbbb1..8b149996fc00 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -2076,7 +2076,7 @@ cleanup: err = brcmf_pcie_probe(pdev, NULL); if (err) - brcmf_err(bus, "probe after resume failed, err=%d\n", err); + __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err); return err; } diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 6ff2674f8466..45abb25b65a9 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -571,20 +571,18 @@ il3945_tx_skb(struct il_priv *il, /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, + secondlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) goto drop_unlock; } @@ -1015,11 +1013,11 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) /* Get physical address of RB/SKB */ page_dma = - pci_map_page(il->pci_dev, page, 0, + dma_map_page(&il->pci_dev->dev, page, 0, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { __free_pages(page, il->hw_params.rx_page_order); break; } @@ -1028,9 +1026,9 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority) if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - pci_unmap_page(il->pci_dev, page_dma, + dma_unmap_page(&il->pci_dev->dev, page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } @@ -1062,9 +1060,10 @@ il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1111,9 +1110,10 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1213,9 +1213,9 @@ il3945_rx_handle(struct il_priv *il) rxq->queue[i] = NULL; - pci_unmap_page(il->pci_dev, rxb->page_dma, + dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; @@ -1260,11 +1260,11 @@ il3945_rx_handle(struct il_priv *il) spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, - rxb->page_dma))) { + dma_map_page(&il->pci_dev->dev, rxb->page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, + rxb->page_dma))) { __il_free_pages(il, rxb->page); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); @@ -3616,9 +3616,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { IL_WARN("No suitable DMA available.\n"); goto out_pci_disable_device; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 0597d828bee1..a773939b8c2a 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -652,16 +652,16 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) /* Unmap tx_cmd */ if (counter) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_single(&dev->dev, + dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); /* unmap chunks if any */ for (i = 1; i < counter; i++) - pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), - le32_to_cpu(tfd->tbs[i].len), - PCI_DMA_TODEVICE); + dma_unmap_single(&dev->dev, le32_to_cpu(tfd->tbs[i].addr), + le32_to_cpu(tfd->tbs[i].len), DMA_TO_DEVICE); /* free SKB */ if (txq->skbs) { diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 341d6a2bc690..0223532fd56a 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -94,9 +94,10 @@ il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -342,11 +343,10 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) } /* Get physical address of the RB */ - page_dma = - pci_map_page(il->pci_dev, page, 0, - PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) { + page_dma = dma_map_page(&il->pci_dev->dev, page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) { __free_pages(page, il->hw_params.rx_page_order); break; } @@ -355,9 +355,9 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority) if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - pci_unmap_page(il->pci_dev, page_dma, + dma_unmap_page(&il->pci_dev->dev, page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __free_pages(page, il->hw_params.rx_page_order); return; } @@ -409,9 +409,10 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].page != NULL) { - pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, + dma_unmap_page(&il->pci_dev->dev, + rxq->pool[i].page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); __il_free_pages(il, rxq->pool[i].page); rxq->pool[i].page = NULL; } @@ -1815,20 +1816,18 @@ il4965_tx_skb(struct il_priv *il, /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ - txcmd_phys = - pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) + txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys))) goto drop_unlock; /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { - phys_addr = - pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, - PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) + phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len, + secondlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) goto drop_unlock; } @@ -1853,8 +1852,8 @@ il4965_tx_skb(struct il_priv *il, offsetof(struct il_tx_cmd, scratch); /* take back ownership of DMA buffer to enable update */ - pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys); @@ -1869,8 +1868,8 @@ il4965_tx_skb(struct il_priv *il, if (info->flags & IEEE80211_TX_CTL_AMPDU) il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len)); - pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen, + DMA_BIDIRECTIONAL); /* Tell device the write idx *just past* this latest filled TFD */ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); @@ -3929,15 +3928,15 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) /* Unmap tx_cmd */ if (num_tbs) - pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), + dma_unmap_single(&dev->dev, + dma_unmap_addr(&txq->meta[idx], mapping), dma_unmap_len(&txq->meta[idx], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i), - il4965_tfd_tb_get_len(tfd, i), - PCI_DMA_TODEVICE); + dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i), + il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE); /* free SKB */ if (txq->skbs) { @@ -4243,9 +4242,9 @@ il4965_rx_handle(struct il_priv *il) rxq->queue[i] = NULL; - pci_unmap_page(il->pci_dev, rxb->page_dma, + dma_unmap_page(&il->pci_dev->dev, rxb->page_dma, PAGE_SIZE << il->hw_params.rx_page_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); pkt = rxb_addr(rxb); len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; @@ -4290,12 +4289,12 @@ il4965_rx_handle(struct il_priv *il) spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = - pci_map_page(il->pci_dev, rxb->page, 0, - PAGE_SIZE << il->hw_params. - rx_page_order, PCI_DMA_FROMDEVICE); + dma_map_page(&il->pci_dev->dev, rxb->page, 0, + PAGE_SIZE << il->hw_params.rx_page_order, + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(il->pci_dev, - rxb->page_dma))) { + if (unlikely(dma_mapping_error(&il->pci_dev->dev, + rxb->page_dma))) { __il_free_pages(il, rxb->page); rxb->page = NULL; list_add_tail(&rxb->list, &rxq->rx_used); @@ -6514,14 +6513,9 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { IL_WARN("No suitable DMA available.\n"); diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 219fed91cac5..683b632981ed 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -2819,10 +2819,10 @@ il_cmd_queue_unmap(struct il_priv *il) i = il_get_cmd_idx(q, q->read_ptr, 0); if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } @@ -2831,10 +2831,10 @@ il_cmd_queue_unmap(struct il_priv *il) i = q->n_win; if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(il->pci_dev, + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(&txq->meta[i], mapping), dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); txq->meta[i].flags = 0; } } @@ -3197,10 +3197,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) } #endif - phys_addr = - pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) { + phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) { idx = -ENOMEM; goto out; } @@ -3298,8 +3297,8 @@ il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) txq->time_stamp = jiffies; - pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 7f1faa9d97b4..52d1d391f4c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -9,7 +9,7 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 64 +#define IWL_22000_UCODE_API_MAX 65 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -154,7 +154,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true, \ + .mac_addr_from_csr = 0x380, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ .trans.use_tfh = true, \ @@ -215,6 +215,67 @@ static const struct iwl_ht_params iwl_22000_ht_params = { }, \ } +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_22000_UCODE_API_MAX, \ + .ucode_api_min = IWL_22000_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_22000_DCCM_OFFSET, \ + .dccm_len = IWL_22000_DCCM_LEN, \ + .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ + .dccm2_len = IWL_22000_DCCM2_LEN, \ + .smem_offset = IWL_22000_SMEM_OFFSET, \ + .smem_len = IWL_22000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_22000_NVM_VERSION, \ + .trans.use_tfh = true, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0x400000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + } + +#define IWL_DEVICE_BZ \ + IWL_DEVICE_BZ_COMMON, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ + .trans.base_params = &iwl_ax210_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_256_ba_txq_size = 1024, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + } + const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { .mq_rx_supported = true, .use_tfh = true, @@ -373,7 +434,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { }; const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_AX210, + .device_family = IWL_DEVICE_FAMILY_BZ, .base_params = &iwl_ax210_base_params, .mq_rx_supported = true, .use_tfh = true, @@ -394,6 +455,7 @@ const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; +const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; @@ -763,28 +825,28 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, .uhb_supported = true, - IWL_DEVICE_AX210, + IWL_DEVICE_BZ, .num_rbds = IWL_NUM_RBDS_AX210_HE, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 871533beff30..7a7ca06d46c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -89,7 +89,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { .apmg_not_supported = true, \ .num_rbds = 512, \ .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = true, \ + .mac_addr_from_csr = 0x380, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index c01523f64bfc..cc7b69fd14d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2018 - 2021 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well @@ -1950,7 +1950,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) } } -static void iwl_nic_error(struct iwl_op_mode *op_mode) +static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 80475c7a6fba..3cd7b423c588 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -318,7 +318,7 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv, (__le32 *)&priv->delta_stats._name, \ (__le32 *)&priv->max_delta_stats._name, \ (__le32 *)&priv->accum_stats._name, \ - sizeof(*_name)); + sizeof(*_name)) ACCUM(common); ACCUM(rx_non_phy); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 34933f133a0a..1efac0b2a94d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -264,7 +264,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - enabled = !!wifi_pkg->package.elements[0].integer.value; + enabled = !!wifi_pkg->package.elements[1].integer.value; if (!enabled) { *block_list_size = -1; @@ -273,15 +273,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, goto out_free; } - if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || - wifi_pkg->package.elements[1].integer.value > + if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || + wifi_pkg->package.elements[2].integer.value > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", wifi_pkg->package.elements[1].integer.value); ret = -EINVAL; goto out_free; } - *block_list_size = wifi_pkg->package.elements[1].integer.value; + *block_list_size = wifi_pkg->package.elements[2].integer.value; IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size); if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) { @@ -294,15 +294,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, for (i = 0; i < *block_list_size; i++) { u32 country; - if (wifi_pkg->package.elements[2 + i].type != + if (wifi_pkg->package.elements[3 + i].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_RADIO(fwrt, - "TAS invalid array elem %d\n", 2 + i); + "TAS invalid array elem %d\n", 3 + i); ret = -EINVAL; goto out_free; } - country = wifi_pkg->package.elements[2 + i].integer.value; + country = wifi_pkg->package.elements[3 + i].integer.value; block_list_array[i] = cpu_to_le32(country); IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } @@ -412,20 +412,35 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); static int iwl_sar_set_profile(union acpi_object *table, struct iwl_sar_profile *profile, - bool enabled) + bool enabled, u8 num_chains, u8 num_sub_bands) { - int i; - - profile->enabled = enabled; - - for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) { - if (table[i].type != ACPI_TYPE_INTEGER || - table[i].integer.value > U8_MAX) - return -EINVAL; + int i, j, idx = 0; - profile->table[i] = table[i].integer.value; + /* + * The table from ACPI is flat, but we store it in a + * structured array. + */ + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { + for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { + /* if we don't have the values, use the default */ + if (i >= num_chains || j >= num_sub_bands) { + profile->chains[i].subbands[j] = 0; + } else { + if (table[idx].type != ACPI_TYPE_INTEGER || + table[idx].integer.value > U8_MAX) + return -EINVAL; + + profile->chains[i].subbands[j] = + table[idx].integer.value; + + idx++; + } + } } + /* Only if all values were valid can the profile be enabled */ + profile->enabled = enabled; + return 0; } @@ -433,10 +448,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_subbands, int prof_a, int prof_b) { - int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; - int i, j, idx; + int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; + int i, j; - for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) { + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { struct iwl_sar_profile *prof; /* don't allow SAR to be disabled (profile 0 means disable) */ @@ -467,11 +482,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, i, profs[i]); IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); for (j = 0; j < n_subbands; j++) { - idx = i * ACPI_SAR_NUM_SUB_BANDS + j; per_chain[i * n_subbands + j] = - cpu_to_le16(prof->table[idx]); + cpu_to_le16(prof->chains[i].subbands[j]); IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", - j, prof->table[idx]); + j, prof->chains[i].subbands[j]); } } @@ -486,7 +500,7 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, for (i = 0; i < n_tables; i++) { ret = iwl_sar_fill_table(fwrt, - &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS], + &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], n_subbands, prof_a, prof_b); if (ret) break; @@ -501,28 +515,71 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) union acpi_object *wifi_pkg, *table, *data; bool enabled; int ret, tbl_rev; + u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + ACPI_WRDS_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; } - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } + IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); + enabled = !!(wifi_pkg->package.elements[1].integer.value); /* position of the actual table */ @@ -531,7 +588,8 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ - ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled); + ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled, + num_chains, num_sub_bands); out_free: kfree(data); return ret; @@ -544,23 +602,64 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; + u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev); - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + ACPI_EWRD_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; } - if (tbl_rev != 0) { - ret = -EINVAL; - goto out_free; + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; @@ -589,13 +688,13 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) * have profile 0). So in the array we start from 1. */ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], - &fwrt->sar_profiles[i + 1], - enabled); + &fwrt->sar_profiles[i + 1], enabled, + num_chains, num_sub_bands); if (ret < 0) break; /* go to the next table */ - pos += ACPI_SAR_TABLE_SIZE; + pos += num_chains * num_sub_bands; } out_free: @@ -607,41 +706,93 @@ IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; - int i, j, ret, tbl_rev; - int idx = 1; + int i, j, k, ret, tbl_rev; + int idx = 1; /* start from one to skip the domain */ + u8 num_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); + /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, - ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev); + ACPI_WGDS_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } - if (IS_ERR(wifi_pkg)) { - ret = PTR_ERR(wifi_pkg); - goto out_free; + num_bands = ACPI_GEO_NUM_BANDS_REV2; + + goto read_table; } - if (tbl_rev > 1) { - ret = -EINVAL; - goto out_free; + /* then try revision 0 (which is the same as 1) */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WGDS_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0 && tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_bands = ACPI_GEO_NUM_BANDS_REV0; + + goto read_table; } + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: fwrt->geo_rev = tbl_rev; for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { - for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) { + for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { union acpi_object *entry; - entry = &wifi_pkg->package.elements[idx++]; - if (entry->type != ACPI_TYPE_INTEGER || - entry->integer.value > U8_MAX) { - ret = -EINVAL; - goto out_free; + /* + * num_bands is either 2 or 3, if it's only 2 then + * fill the third band (6 GHz) with the values from + * 5 GHz (second band) + */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].max = + fwrt->geo_profiles[i].bands[1].max; + } else { + entry = &wifi_pkg->package.elements[idx++]; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].max = + entry->integer.value; } - fwrt->geo_profiles[i].values[j] = entry->integer.value; + for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { + /* same here as above */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].chains[k] = + fwrt->geo_profiles[i].bands[1].chains[k]; + } else { + entry = &wifi_pkg->package.elements[idx++]; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].chains[k] = + entry->integer.value; + } + } } } + ret = 0; out_free: kfree(data); @@ -673,43 +824,26 @@ IWL_EXPORT_SYMBOL(iwl_sar_geo_support); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands) { - int ret, i, j; + int i, j; if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; - ret = iwl_sar_get_wgds_table(fwrt); - if (ret < 0) { - IWL_DEBUG_RADIO(fwrt, - "Geo SAR BIOS table invalid or unavailable. (%d)\n", - ret); - /* we don't fail if the table is not available */ - return -ENOENT; - } - for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) { for (j = 0; j < n_bands; j++) { struct iwl_per_chain_offset *chain = &table[i * n_bands + j]; - u8 *value; - - if (j * ACPI_GEO_PER_CHAIN_SIZE >= - ARRAY_SIZE(fwrt->geo_profiles[0].values)) - /* - * Currently we only store lb an hb values, and - * don't have any special ones for uhb. So leave - * those empty for the time being - */ - break; - - value = &fwrt->geo_profiles[i].values[j * - ACPI_GEO_PER_CHAIN_SIZE]; - chain->max_tx_power = cpu_to_le16(value[0]); - chain->chain_a = value[1]; - chain->chain_b = value[2]; + + chain->max_tx_power = + cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); + chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; + chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; IWL_DEBUG_RADIO(fwrt, "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", - i, j, value[1], value[2], value[0]); + i, j, + fwrt->geo_profiles[i].bands[j].chains[0], + fwrt->geo_profiles[i].bands[j].chains[1], + fwrt->geo_profiles[i].bands[j].max); } } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index b858e998999c..16ed0995b51e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -26,21 +26,46 @@ #define ACPI_WIFI_DOMAIN (0x07) -#define ACPI_SAR_TABLE_SIZE 10 #define ACPI_SAR_PROFILE_NUM 4 -#define ACPI_GEO_TABLE_SIZE 6 #define ACPI_NUM_GEO_PROFILES 3 #define ACPI_GEO_PER_CHAIN_SIZE 3 -#define ACPI_SAR_NUM_CHAIN_LIMITS 2 -#define ACPI_SAR_NUM_SUB_BANDS 5 -#define ACPI_SAR_NUM_TABLES 1 +#define ACPI_SAR_NUM_CHAINS_REV0 2 +#define ACPI_SAR_NUM_CHAINS_REV1 2 +#define ACPI_SAR_NUM_CHAINS_REV2 4 +#define ACPI_SAR_NUM_SUB_BANDS_REV0 5 +#define ACPI_SAR_NUM_SUB_BANDS_REV1 11 +#define ACPI_SAR_NUM_SUB_BANDS_REV2 11 + +#define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \ + ACPI_SAR_NUM_SUB_BANDS_REV0 + 2) +#define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \ + ACPI_SAR_NUM_SUB_BANDS_REV1 + 2) +#define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \ + ACPI_SAR_NUM_SUB_BANDS_REV2 + 2) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV0 * \ + ACPI_SAR_NUM_SUB_BANDS_REV0 + 3) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV1 * \ + ACPI_SAR_NUM_SUB_BANDS_REV1 + 3) +#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ + ACPI_SAR_NUM_CHAINS_REV2 * \ + ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) + +/* revision 0 and 1 are identical, except for the semantics in the FW */ +#define ACPI_GEO_NUM_BANDS_REV0 2 +#define ACPI_GEO_NUM_BANDS_REV2 3 +#define ACPI_GEO_NUM_CHAINS 2 + +#define ACPI_WGDS_WIFI_DATA_SIZE_REV0 (ACPI_NUM_GEO_PROFILES * \ + ACPI_GEO_NUM_BANDS_REV0 * \ + ACPI_GEO_PER_CHAIN_SIZE + 1) +#define ACPI_WGDS_WIFI_DATA_SIZE_REV2 (ACPI_NUM_GEO_PROFILES * \ + ACPI_GEO_NUM_BANDS_REV2 * \ + ACPI_GEO_PER_CHAIN_SIZE + 1) -#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) -#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ - ACPI_SAR_TABLE_SIZE + 3) -#define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 @@ -51,8 +76,6 @@ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) -#define ACPI_WGDS_TABLE_SIZE 3 - #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ @@ -64,13 +87,28 @@ #define ACPI_PPAG_MIN_HB -16 #define ACPI_PPAG_MAX_HB 40 +/* + * The profile for revision 2 is a superset of revision 1, which is in + * turn a superset of revision 0. So we can store all revisions + * inside revision 2, which is what we represent here. + */ +struct iwl_sar_profile_chain { + u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; +}; + struct iwl_sar_profile { bool enabled; - u8 table[ACPI_SAR_TABLE_SIZE]; + struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2]; +}; + +/* Same thing as with SAR, all revisions fit in revision 2 */ +struct iwl_geo_profile_band { + u8 max; + u8 chains[ACPI_GEO_NUM_CHAINS]; }; struct iwl_geo_profile { - u8 values[ACPI_GEO_TABLE_SIZE]; + struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; enum iwl_dsm_funcs_rev_0 { @@ -234,7 +272,7 @@ static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { - return -ENOENT; + return 1; } static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 01580c9175f3..3e81e9369224 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -142,7 +142,7 @@ enum iwl_bt_mxbox_dw3 { "\t%s: %d%s", \ #_field, \ BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", "); + true ? "\n" : ", ") enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index ce060c3dfd7b..ee6b5844a871 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -550,7 +550,8 @@ enum iwl_legacy_cmds { WOWLAN_CONFIGURATION = 0xe1, /** - * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd + * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4, + * &struct iwl_wowlan_rsc_tsc_params_cmd */ WOWLAN_TSC_RSC_PARAM = 0xe2, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index b2e7ef3ddc88..3ec82cae3981 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -6,6 +6,7 @@ */ #ifndef __iwl_fw_api_d3_h__ #define __iwl_fw_api_d3_h__ +#include <iwl-trans.h> /** * enum iwl_d0i3_flags - d0i3 flags @@ -389,11 +390,14 @@ struct iwl_wowlan_config_cmd { u8 reserved; } __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ +#define IWL_NUM_RSC 16 +#define WOWLAN_KEY_MAX_SIZE 32 +#define WOWLAN_GTK_KEYS_NUM 2 +#define WOWLAN_IGTK_KEYS_NUM 2 + /* * WOWLAN_TSC_RSC_PARAMS */ -#define IWL_NUM_RSC 16 - struct tkip_sc { __le16 iv16; __le16 pad; @@ -425,11 +429,19 @@ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 { union iwl_all_tsc_rsc all_tsc_rsc; } __packed; /* ALL_TSC_RSC_API_S_VER_2 */ -struct iwl_wowlan_rsc_tsc_params_cmd { +struct iwl_wowlan_rsc_tsc_params_cmd_v4 { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params; __le32 sta_id; } __packed; /* ALL_TSC_RSC_API_S_VER_4 */ +struct iwl_wowlan_rsc_tsc_params_cmd { + __le64 ucast_rsc[IWL_MAX_TID_COUNT]; + __le64 mcast_rsc[WOWLAN_GTK_KEYS_NUM][IWL_MAX_TID_COUNT]; + __le32 sta_id; +#define IWL_MCAST_KEY_MAP_INVALID 0xff + u8 mcast_key_id_map[4]; +} __packed; /* ALL_TSC_RSC_API_S_VER_5 */ + #define IWL_MIC_KEY_SIZE 8 struct iwl_mic_keys { u8 tx[IWL_MIC_KEY_SIZE]; @@ -541,10 +553,6 @@ struct iwl_wowlan_gtk_status_v1 { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ -#define WOWLAN_KEY_MAX_SIZE 32 -#define WOWLAN_GTK_KEYS_NUM 2 -#define WOWLAN_IGTK_KEYS_NUM 2 - /** * struct iwl_wowlan_gtk_status - GTK status * @key: GTK material diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 5a2d9a1f7e73..d8b5870d6e9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -33,12 +33,11 @@ struct iwl_fw_ini_hcmd { * * @version: TLV version * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain - * @data: TLV data */ struct iwl_fw_ini_header { __le32 version; __le32 domain; - u8 data[]; + /* followed by the data */ } __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */ /** @@ -130,6 +129,7 @@ struct iwl_fw_ini_region_internal_buffer { * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG + * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and * &IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by @@ -249,7 +249,6 @@ struct iwl_fw_ini_hcmd_tlv { * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration - * @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3 * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { @@ -257,7 +256,6 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, - IWL_FW_INI_ALLOCATION_ID_INTERNAL, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ @@ -298,6 +296,7 @@ enum iwl_fw_ini_buffer_location { * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory + * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { @@ -319,6 +318,7 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_DRAM_IMR, IWL_FW_INI_REGION_PCI_IOSF_CONFIG, IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, + IWL_FW_INI_REGION_DBGI_SRAM, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 0e38eb1cd75d..6bbb8b8c91cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -151,6 +151,10 @@ enum iwl_tof_mcsi_enable { * is valid * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID: session id flag is valid + * @IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR: the bss_color field is valid + * @IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR: the + * min_time_between_msr and max_time_between_msr fields are valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), @@ -169,6 +173,9 @@ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22), IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23), IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24), + IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = BIT(25), + IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = BIT(26), + IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = BIT(27), }; /** @@ -186,6 +193,8 @@ enum iwl_tof_responder_cmd_valid_field { * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the * initiator supports it + * @IWL_TOF_RESPONDER_FLAGS_SESSION_ID: send the session id in the initial FTM + * frame. */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), @@ -200,6 +209,7 @@ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), + IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27), }; /** @@ -297,13 +307,13 @@ struct iwl_tof_responder_config_cmd_v7 { * @r2i_ndp_params: parameters for R2I NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) - * bits 6 - 7: max number of total LTFs - * (&enum ieee80211_range_params_max_total_ltf) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf * @i2r_ndp_params: parameters for I2R NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams - * bits 6 - 7: max number of total LTFs - * (&enum ieee80211_range_params_max_total_ltf) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf */ struct iwl_tof_responder_config_cmd_v8 { __le32 cmd_valid_fields; @@ -322,6 +332,58 @@ struct iwl_tof_responder_config_cmd_v8 { u8 i2r_ndp_params; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ +/** + * struct iwl_tof_responder_config_cmd_v9 - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @bss_color: current AP bss_color + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @min_time_between_msr: for non trigger based NDP ranging, minimum time + * between measurements in milliseconds. + * @max_time_between_msr: for non trigger based NDP ranging, maximum time + * between measurements in milliseconds. + */ +struct iwl_tof_responder_config_cmd_v9 { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 bss_color; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; + __le16 max_time_between_msr; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ + #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** @@ -489,6 +551,10 @@ struct iwl_tof_range_req_ap_entry_v2 { * instead of fw internal values. * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR * frames with protected management frames. + * @IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK: terminate the session if + * the responder asked for LMR feedback although the initiator did not set + * the LMR feedback bit in the FTM request. If not set, the initiator will + * continue with the session and will provide the LMR feedback. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -504,6 +570,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), + IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), }; /** @@ -795,6 +862,90 @@ struct iwl_tof_range_req_ap_entry_v8 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */ /** + * struct iwl_tof_range_req_ap_entry_v9 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: For EDCA based ranging: Recommended value to be sent to the + * AP. Measurement periodicity In units of 100ms. ignored if + * num_of_bursts_exp = 0. + * For non trigger based NDP ranging, the maximum time between + * measurements in units of milliseconds. + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @bss_color: the BSS color of the responder. Only valid if + * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: reserved + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: reserved + * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. + * One of &enum ieee80211_range_params_max_total_ltf. + * @bss_color: the BSS color of the responder. Only valid if + * &IWL_INITIATOR_AP_FLAGS_NON_TB or &IWL_INITIATOR_AP_FLAGS_TB is set. + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz + * @min_time_between_msr: For non trigger based NDP ranging, the minimum time + * between measurements in units of milliseconds + */ +struct iwl_tof_range_req_ap_entry_v9 { + __le32 initiator_ap_flags; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + u16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + u8 r2i_max_total_ltf; + u8 i2r_max_total_ltf; + u8 bss_color; + u8 band; + __le16 min_time_between_msr; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ + +/** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) @@ -1043,6 +1194,34 @@ struct iwl_tof_range_req_cmd_v12 { struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ +/** + * struct iwl_tof_range_req_cmd_v13 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v9. + */ +struct iwl_tof_range_req_cmd_v13 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 93084bbad534..7be7715b431d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ @@ -137,12 +137,14 @@ struct iwl_mac_data_ibss { * early termination detection. * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w) + * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT */ enum iwl_mac_data_policy { TWT_SUPPORTED = BIT(0), MORE_DATA_ACK_SUPPORTED = BIT(1), FLEXIBLE_TWT_SUPPORTED = BIT(2), PROTECTED_TWT_SUPPORTED = BIT(3), + BROADCAST_TWT_SUPPORTED = BIT(4), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index f06214d418aa..5204aa94e72a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,6 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2021 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -20,7 +21,7 @@ enum iwl_prot_offload_subcmd_ids { #define MAX_STORED_BEACON_SIZE 600 /** - * struct iwl_stored_beacon_notif - Stored beacon notification + * struct iwl_stored_beacon_notif_common - Stored beacon notif common fields * * @system_time: system time on air rise * @tsf: TSF on air rise @@ -29,9 +30,8 @@ enum iwl_prot_offload_subcmd_ids { * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count - * @data: beacon data, length in @byte_count */ -struct iwl_stored_beacon_notif { +struct iwl_stored_beacon_notif_common { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; @@ -39,7 +39,32 @@ struct iwl_stored_beacon_notif { __le16 channel; __le32 rates; __le32 byte_count; +} __packed; + +/** + * struct iwl_stored_beacon_notif - Stored beacon notification + * + * @common: fields common for all versions + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif_v2 { + struct iwl_stored_beacon_notif_common common; u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ +/** + * struct iwl_stored_beacon_notif_v3 - Stored beacon notification + * + * @common: fields common for all versions + * @sta_id: station for which the beacon was received + * @reserved: reserved for alignment + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif_v3 { + struct iwl_stored_beacon_notif_common common; + u8 sta_id; + u8 reserved[3]; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */ + #endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index b2605aefc290..8b200379f7c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -874,7 +874,7 @@ struct iwl_scan_probe_params_v3 { u8 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; - u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE]; + u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */ /** @@ -894,7 +894,7 @@ struct iwl_scan_probe_params_v4 { __le16 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; - u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE]; + u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */ #define SCAN_MAX_NUM_CHANS_V3 67 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 12b2f2c48387..f1a3e14880e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -384,13 +384,17 @@ struct iwl_mvm_add_sta_key_cmd_v1 { * @rx_mic_key: TKIP RX unicast or multicast key * @tx_mic_key: TKIP TX key * @transmit_seq_cnt: TSC, transmit packet number + * + * Note: This is used for both v2 and v3, the difference being + * in the way the common.rx_secur_seq_cnt is used, in v2 that's + * the strange hole format, in v3 it's just a u64. */ struct iwl_mvm_add_sta_key_cmd { struct iwl_mvm_add_sta_key_common common; __le64 rx_mic_key; __le64 tx_mic_key; __le64 transmit_seq_cnt; -} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2, ADD_MODIFY_STA_KEY_API_S_VER_3 */ /** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index df7c55e06f54..6dcafd0a3d4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1517,6 +1517,37 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + u32 prph_data; + int i; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return -EBUSY; + + range->range_data_size = reg->dev_addr.size; + iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG, + DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK); + for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { + prph_data = iwl_read_prph(fwrt->trans, (i % 2) ? + DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : + DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); + if (prph_data == 0x5a5a5a5a) { + iwl_trans_release_nic_access(fwrt->trans); + return -EBUSY; + } + *val++ = cpu_to_le32(prph_data); + } + iwl_trans_release_nic_access(fwrt->trans); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, int idx) @@ -1547,7 +1578,7 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); - return dump->ranges; + return dump->data; } /** @@ -1611,7 +1642,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); - return data->ranges; + return data->data; } static void * @@ -1647,7 +1678,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->version = reg->err_table.version; - return dump->ranges; + return dump->data; } static void * @@ -1662,7 +1693,7 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, dump->type = reg->special_mem.type; dump->version = reg->special_mem.version; - return dump->ranges; + return dump->data; } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, @@ -2189,6 +2220,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, .fill_range = iwl_dump_ini_special_mem_iter, }, + [IWL_FW_INI_REGION_DBGI_SRAM] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_dbgi_sram_iter, + }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, @@ -2321,7 +2358,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, return; if (dump_data->monitor_only) - dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; + dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); file_len = le32_to_cpu(dump_file->file_len); @@ -2530,51 +2567,6 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); -int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_dump_data *dump_data) -{ - struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; - enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); - u32 occur, delay; - unsigned long idx; - - if (!iwl_fw_ini_trigger_on(fwrt, trig)) { - IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", - tp_id); - return -EINVAL; - } - - delay = le32_to_cpu(trig->dump_delay); - occur = le32_to_cpu(trig->occurrences); - if (!occur) - return 0; - - trig->occurrences = cpu_to_le32(--occur); - - /* Check there is an available worker. - * ffz return value is undefined if no zero exists, - * so check against ~0UL first. - */ - if (fwrt->dump.active_wks == ~0UL) - return -EBUSY; - - idx = ffz(fwrt->dump.active_wks); - - if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || - test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) - return -EBUSY; - - fwrt->dump.wks[idx].dump_data = *dump_data; - - IWL_WARN(fwrt, - "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", - tp_id, (u32)(delay / USEC_PER_MSEC)); - - schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); - - return 0; -} - int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) @@ -2703,6 +2695,58 @@ out: clear_bit(wk_idx, &fwrt->dump.active_wks); } +int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_dump_data *dump_data, + bool sync) +{ + struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; + enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); + u32 occur, delay; + unsigned long idx; + + if (!iwl_fw_ini_trigger_on(fwrt, trig)) { + IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", + tp_id); + return -EINVAL; + } + + delay = le32_to_cpu(trig->dump_delay); + occur = le32_to_cpu(trig->occurrences); + if (!occur) + return 0; + + trig->occurrences = cpu_to_le32(--occur); + + /* Check there is an available worker. + * ffz return value is undefined if no zero exists, + * so check against ~0UL first. + */ + if (fwrt->dump.active_wks == ~0UL) + return -EBUSY; + + idx = ffz(fwrt->dump.active_wks); + + if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || + test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) + return -EBUSY; + + fwrt->dump.wks[idx].dump_data = *dump_data; + + if (sync) + delay = 0; + + IWL_WARN(fwrt, + "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", + tp_id, (u32)(delay / USEC_PER_MSEC)); + + schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); + + if (sync) + iwl_fw_dbg_collect_sync(fwrt, idx); + + return 0; +} + void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fwrt_wk_data *wks = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index c0e84ef84f5d..8c3c890066b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -46,7 +46,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_dump_data *dump_data); + struct iwl_fwrt_dump_data *dump_data, + bool sync); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); @@ -284,7 +285,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, trans->dbg.umac_error_event_table = umac_error_event_table; } -static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) +static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) { enum iwl_fw_ini_time_point tp_id; @@ -300,7 +301,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt) tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; } - iwl_dbg_tlv_time_point(fwrt, tp_id, NULL); + _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 9fffac903b93..521ca2bb0e92 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2020 Intel Corporation + * Copyright (C) 2014, 2018-2021 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -305,11 +305,12 @@ struct iwl_fw_ini_error_dump_header { /** * struct iwl_fw_ini_error_dump - ini region dump * @header: the header of this region - * @ranges: the memory ranges of this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_error_dump { struct iwl_fw_ini_error_dump_header header; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /* This bit is used to differentiate between lmac and umac rxf */ @@ -399,12 +400,13 @@ struct iwl_fw_ini_dump_info { * struct iwl_fw_ini_err_table_dump - ini error table dump * @header: header of the region * @version: error table version - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_err_table_dump { struct iwl_fw_ini_error_dump_header header; __le32 version; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** @@ -427,14 +429,15 @@ struct iwl_fw_error_dump_rb { * @write_ptr: write pointer position in the buffer * @cycle_cnt: cycles count * @cur_frag: current fragment in use - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; __le32 write_ptr; __le32 cycle_cnt; __le32 cur_frag; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** @@ -442,13 +445,14 @@ struct iwl_fw_ini_monitor_dump { * @header: header of the region * @type: type of special memory * @version: struct special memory version - * @ranges: the memory ranges of this this region + * @data: data of memory ranges in this region, + * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_special_device_memory { struct iwl_fw_ini_error_dump_header header; __le16 type; __le16 version; - struct iwl_fw_ini_error_dump_range ranges[]; + u8 data[]; } __packed; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9a8c7b7a0816..6c8e9f3a6af2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -414,6 +414,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, + IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59, IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, /* set 2 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index b4b1f75b9c2a..314ed90c23dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -24,7 +24,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data; IWL_DEBUG_FW(trans, - "PNVM complete notification received with status %d\n", + "PNVM complete notification received with status 0x%0x\n", le32_to_cpu(pnvm_ntf->status)); return true; @@ -230,19 +230,10 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) { const struct firmware *pnvm; - char pnvm_name[64]; + char pnvm_name[MAX_PNVM_NAME]; int ret; - /* - * The prefix unfortunately includes a hyphen at the end, so - * don't add the dot here... - */ - snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm", - trans->cfg->fw_name_pre); - - /* ...but replace the hyphen with the dot here. */ - if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name)) - pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.'; + iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h index 61d3d4e0b7d9..203c367dd4de 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h @@ -12,7 +12,27 @@ #define MVM_UCODE_PNVM_TIMEOUT (HZ / 4) +#define MAX_PNVM_NAME 64 + int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait); +static inline +void iwl_pnvm_get_fs_name(struct iwl_trans *trans, + u8 *pnvm_name, size_t max_len) +{ + int pre_len; + + /* + * The prefix unfortunately includes a hyphen at the end, so + * don't add the dot here... + */ + snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre); + + /* ...but replace the hyphen with the dot here. */ + pre_len = strlen(trans->cfg->fw_name_pre); + if (pre_len < max_len && pre_len > 0) + pnvm_name[pre_len - 1] = '.'; +} + #endif /* __IWL_PNVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index bf6ee56d4d96..7eb534df5331 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -33,6 +33,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_AX210, + IWL_DEVICE_FAMILY_BZ, }; /* @@ -321,7 +322,7 @@ struct iwl_fw_mon_regs { * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section - * @mac_addr_from_csr: read HW address from CSR registers + * @mac_addr_from_csr: read HW address from CSR registers at this offset * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response @@ -343,6 +344,8 @@ struct iwl_fw_mon_regs { * supports 256 BA aggregation * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) + * @mac_addr_csr_base: CSR base register for MAC address access, if not set + * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -378,7 +381,7 @@ struct iwl_cfg { internal_wimax_coex:1, host_interrupt_operation_mode:1, high_temp:1, - mac_addr_from_csr:1, + mac_addr_from_csr:10, lp_xtal_workaround:1, disable_dummy_notification:1, apmg_not_supported:1, @@ -512,6 +515,7 @@ extern const char iwl_ax211_name[]; extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; +extern const char iwl_bz_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 47e5a17c0f48..cf796403c45c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -104,6 +104,10 @@ /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) +/* Doorbell NMI (since Bz) */ +#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) +#define CSR_DOORBELL_VECTOR_NMI BIT(1) + /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) #define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19) @@ -266,6 +270,14 @@ #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) +/* From Bz we use these instead during init/reset flow */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_INIT BIT(6) +#define CSR_GP_CNTRL_REG_FLAG_ROM_START BIT(7) +#define CSR_GP_CNTRL_REG_FLAG_MAC_STATUS BIT(20) +#define CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ BIT(21) +#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS BIT(28) +#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ BIT(29) +#define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ #define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) @@ -604,10 +616,10 @@ enum msix_hw_int_causes { * HW address related registers * *****************************************************************************/ -#define CSR_ADDR_BASE (0x380) -#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE) -#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4) -#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8) -#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC) +#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr) +#define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00) +#define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04) +#define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08) +#define CSR_MAC_ADDR1_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x0c) #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 0ddd255a8cc1..125479b5c0d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -131,8 +131,7 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, goto err; if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && - alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 && - alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL) + alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) goto err; trans->dbg.fw_mon_cfg[alloc_id] = *alloc; @@ -435,13 +434,16 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) { const struct firmware *fw; + const char *yoyo_bin = "iwl-debug-yoyo.bin"; int res; if (!iwlwifi_mod_params.enable_ini || trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) return; - res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); + res = firmware_request_nowarn(&fw, yoyo_bin, dev); + IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin); + if (res) return; @@ -621,6 +623,7 @@ static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt, .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION), .data[0] = &data, .len[0] = sizeof(data), + .flags = CMD_SEND_IN_RFKILL, }; int ret, j; @@ -683,7 +686,7 @@ static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) }; int ret; - ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false); if (!ret || ret == -EBUSY) { u32 occur = le32_to_cpu(dump_data.trig->occurrences); u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); @@ -927,7 +930,7 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, } static int -iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, +iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, struct list_head *active_trig_list, union iwl_dbg_tlv_tp_data *tp_data, bool (*data_check)(struct iwl_fw_runtime *fwrt, @@ -946,7 +949,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, int ret, i; if (!num_data) { - ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; } @@ -955,7 +958,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, if (!data_check || data_check(fwrt, &dump_data, tp_data, le32_to_cpu(dump_data.trig->data[i]))) { - ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); + ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; @@ -1043,9 +1046,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) } } -void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_time_point tp_id, - union iwl_dbg_tlv_tp_data *tp_data) +void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data, + bool sync) { struct list_head *hcmd_list, *trig_list; @@ -1060,12 +1064,12 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_PERIODIC: iwl_dbg_tlv_set_periodic_trigs(fwrt); @@ -1075,13 +1079,13 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); + iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; } } -IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point); +IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 92c720527946..c12b1fd3f479 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ @@ -48,9 +48,25 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans); void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext); void iwl_dbg_tlv_init(struct iwl_trans *trans); -void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, - enum iwl_fw_ini_time_point tp_id, - union iwl_dbg_tlv_tp_data *tp_data); +void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data, + bool sync); + +static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, false); +} + +static inline void iwl_dbg_tlv_time_point_sync(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + _iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, true); +} + void iwl_dbg_tlv_del_timers(struct iwl_trans *trans); #endif /* __iwl_dbg_tlv_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 977dce686bdb..77124b8b235e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -78,7 +78,7 @@ enum { }; /* Protects the table contents, i.e. the ops pointer & drv list */ -static struct mutex iwlwifi_opmode_table_mtx; +static DEFINE_MUTEX(iwlwifi_opmode_table_mtx); static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ @@ -1754,8 +1754,6 @@ static int __init iwl_drv_init(void) { int i, err; - mutex_init(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 33d42e08d5b8..2517c4ae07ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include <linux/delay.h> @@ -213,9 +213,12 @@ void iwl_force_nmi(struct iwl_trans *trans) else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); - else + else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); + else + iwl_write32(trans, CSR_DOORBELL_VECTOR, + CSR_DOORBELL_VECTOR_NMI); } IWL_EXPORT_SYMBOL(iwl_force_nmi); @@ -398,6 +401,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) int iwl_finish_nic_init(struct iwl_trans *trans, const struct iwl_cfg_trans_params *cfg_trans) { + u32 poll_ready; int err; if (cfg_trans->bisr_workaround) { @@ -409,7 +413,16 @@ int iwl_finish_nic_init(struct iwl_trans *trans, * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + } else { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY; + } if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); @@ -419,10 +432,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans, * device-internal resources is supported, e.g. iwl_write_prph() * and accesses to uCode SRAM. */ - err = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - 25000); + err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000); if (err < 0) IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); @@ -468,5 +478,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, if (interrupts_enabled) iwl_trans_interrupts(trans, true); - iwl_trans_fw_error(trans); + iwl_trans_fw_error(trans, false); } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 850648ebd61c..475f951d4b1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -549,7 +549,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_OMI_CONTROL, + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, @@ -568,7 +569,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | @@ -595,6 +597,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + .phy_cap_info[10] = + IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -634,6 +638,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | @@ -742,6 +747,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; if ((tx_chains & rx_chains) == ANT_AB) { + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; @@ -958,8 +965,10 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { - __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); - __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, + CSR_MAC_ADDR0_STRAP(trans))); + __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, + CSR_MAC_ADDR1_STRAP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* @@ -969,8 +978,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, if (is_valid_ether_addr(data->hw_addr)) return; - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP(trans))); + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } @@ -1373,6 +1382,25 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } + /* + * Certain firmware versions might report no valid channels + * if booted in RF-kill, i.e. not all calibrations etc. are + * running. We'll get out of this situation later when the + * rfkill is removed and we update the regdomain again, but + * since cfg80211 doesn't accept an empty regdomain, add a + * dummy (unusable) rule here in this case so we can init. + */ + if (!valid_rules) { + valid_rules = 1; + rule = ®d->reg_rules[valid_rules - 1]; + rule->freq_range.start_freq_khz = MHZ_TO_KHZ(2412); + rule->freq_range.end_freq_khz = MHZ_TO_KHZ(2413); + rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(1); + rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); + rule->power_rule.max_eirp = + DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); + } + regd->n_reg_rules = valid_rules; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index cf9c64090014..af5f9b210f22 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -78,7 +78,7 @@ struct iwl_cfg; * there are Tx packets pending in the transport layer. * Must be atomic * @nic_error: error notification. Must be atomic and must be called with BH - * disabled. + * disabled, unless the sync parameter is true. * @cmd_queue_full: Called when the command queue gets full. Must be atomic and * called with BH disabled. * @nic_config: configure NIC, called before firmware is started. @@ -102,7 +102,7 @@ struct iwl_op_mode_ops { void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - void (*nic_error)(struct iwl_op_mode *op_mode); + void (*nic_error)(struct iwl_op_mode *op_mode, bool sync); void (*cmd_queue_full)(struct iwl_op_mode *op_mode); void (*nic_config)(struct iwl_op_mode *op_mode); void (*wimax_active)(struct iwl_op_mode *op_mode); @@ -181,9 +181,9 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, op_mode->ops->free_skb(op_mode, skb); } -static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode) +static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync) { - op_mode->ops->nic_error(op_mode); + op_mode->ops->nic_error(op_mode, sync); } static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 9a9e714bf9af..d0a7d58336a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -348,6 +348,13 @@ #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_GP2 0xA030B4 + +/* DBGI SRAM Register details */ +#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C +#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000 +#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154 +#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158 + enum { ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0199d7a5a648..8f0ff540f439 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -887,7 +887,7 @@ struct iwl_trans_txqs { bool bc_table_dword; u8 page_offs; u8 dev_cmd_offs; - struct __percpu iwl_tso_hdr_page * tso_hdr_page; + struct iwl_tso_hdr_page __percpu *tso_hdr_page; struct { u8 fifo; @@ -1385,14 +1385,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans) __release(nic_access); } -static inline void iwl_trans_fw_error(struct iwl_trans *trans) +static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) { if (WARN_ON_ONCE(!trans->op_mode)) return; /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { - iwl_op_mode_nic_error(trans->op_mode); + iwl_op_mode_nic_error(trans->op_mode, sync); trans->state = IWL_TRANS_NO_FW; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 1343f25f1090..9d0d01f27d92 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014, 2018-2021 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H @@ -93,6 +93,7 @@ #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true +#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false #define IWL_MVM_FTM_R2I_MAX_REP 7 #define IWL_MVM_FTM_I2R_MAX_REP 7 #define IWL_MVM_FTM_R2I_MAX_STS 1 @@ -102,6 +103,8 @@ #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true +#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 +#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 6a259d867d90..0e97d5e6c644 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -101,11 +101,8 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, return ret; } -struct wowlan_key_data { - struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc; - struct iwl_wowlan_tkip_params_cmd *tkip; - struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; - bool error, use_rsc_tsc, use_tkip, configure_keys; +struct wowlan_key_reprogram_data { + bool error; int wep_key_idx; }; @@ -117,15 +114,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct wowlan_key_data *data = _data; - struct aes_sc *aes_sc, *aes_tx_sc = NULL; - struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; - struct iwl_p1k_cache *rx_p1ks; - u8 *rx_mic_key; - struct ieee80211_key_seq seq; - u32 cur_rx_iv32 = 0; - u16 p1k[IWL_P1K_SIZE]; - int ret, i; + struct wowlan_key_reprogram_data *data = _data; + int ret; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: @@ -162,18 +152,14 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, wkc.wep_key.key_offset = data->wep_key_idx; } - if (data->configure_keys) { - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, - sizeof(wkc), &wkc); - data->error = ret != 0; - - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - mutex_unlock(&mvm->mutex); - } + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); + data->error = ret != 0; + + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; /* don't upload key again */ return; @@ -183,10 +169,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have @@ -196,6 +180,58 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, */ return; case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + mutex_lock(&mvm->mutex); + /* + * The D3 firmware hardcodes the key offset 0 as the key it + * uses to transmit packets to the AP, i.e. the PTK. + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + } else { + /* + * firmware only supports TSC/RSC for a single key, + * so if there are multiple keep overwriting them + * with new ones -- this relies on mac80211 doing + * list_add_tail(). + */ + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + } + mutex_unlock(&mvm->mutex); + data->error = ret != 0; +} + +struct wowlan_key_rsc_tsc_data { + struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; + bool have_rsc_tsc; +}; + +static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_tsc_data *data = _data; + struct aes_sc *aes_sc; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct ieee80211_key_seq seq; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; @@ -204,28 +240,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, tkip_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; - rx_p1ks = data->tkip->rx_uni; - pn64 = atomic64_read(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); - - ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), - p1k); - iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); - - memcpy(data->tkip->mic_keys.tx, - &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - rx_mic_key = data->tkip->mic_keys.rx_unicast; } else { tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; - rx_p1ks = data->tkip->rx_multi; - rx_mic_key = data->tkip->mic_keys.rx_mcast; - data->kek_kck_cmd->gtk_cipher = - cpu_to_le32(STA_KEY_FLG_TKIP); } /* @@ -237,29 +257,15 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ieee80211_get_key_rx_seq(key, i, &seq); tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); - /* wrapping isn't allowed, AP must rekey */ - if (seq.tkip.iv32 > cur_rx_iv32) - cur_rx_iv32 = seq.tkip.iv32; } - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); - ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, - cur_rx_iv32 + 1, p1k); - iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); - - memcpy(rx_mic_key, - &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], - IWL_MIC_KEY_SIZE); - - data->use_tkip = true; - data->use_rsc_tsc = true; + data->have_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (sta) { + struct aes_sc *aes_tx_sc; u64 pn64; aes_sc = @@ -272,10 +278,6 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, } else { aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; - data->kek_kck_cmd->gtk_cipher = - key->cipher == WLAN_CIPHER_SUITE_CCMP ? - cpu_to_le32(STA_KEY_FLG_CCM) : - cpu_to_le32(STA_KEY_FLG_GCMP); } /* @@ -320,35 +322,301 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, ((u64)pn[0] << 40)); } } - data->use_rsc_tsc = true; + data->have_rsc_tsc = true; break; } +} - IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher); +struct wowlan_key_rsc_v5_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc; + bool have_rsc; + int gtks; + int gtk_ids[4]; +}; - if (data->configure_keys) { - mutex_lock(&mvm->mutex); +static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_v5_data *data = _data; + struct ieee80211_key_seq seq; + __le64 *rsc; + int i; + + /* only for ciphers that can be PTK/GTK */ + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + if (sta) { + rsc = data->rsc->ucast_rsc; + } else { + if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids))) + return; + data->gtk_ids[data->gtks] = key->keyidx; + rsc = data->rsc->mcast_rsc[data->gtks % 2]; + if (WARN_ON(key->keyidx > + ARRAY_SIZE(data->rsc->mcast_key_id_map))) + return; + data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2; + if (data->gtks >= 2) { + int prev = data->gtks - 2; + int prev_idx = data->gtk_ids[prev]; + + data->rsc->mcast_key_id_map[prev_idx] = + IWL_MCAST_KEY_MAP_INVALID; + } + data->gtks++; + } + + switch (key->cipher) { + default: + WARN_ON(1); + break; + case WLAN_CIPHER_SUITE_TKIP: + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + + rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) | + seq.tkip.iv16); + } + + data->have_rsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: /* - * The D3 firmware hardcodes the key offset 0 as the key it - * uses to transmit packets to the AP, i.e. the PTK. + * For non-QoS this relies on the fact that both the uCode and + * mac80211/our RX code use TID 0 for checking the PN. */ - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { - mvm->ptk_ivlen = key->iv_len; - mvm->ptk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + if (sta) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + const u8 *pn; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { + rcu_read_unlock(); + break; + } + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, + mvm->trans->num_rx_queues); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + + rcu_read_unlock(); } else { - /* - * firmware only supports TSC/RSC for a single key, - * so if there are multiple keep overwriting them - * with new ones -- this relies on mac80211 doing - * list_add_tail(). - */ - mvm->gtk_ivlen = key->iv_len; - mvm->gtk_icvlen = key->icv_len; - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } } - mutex_unlock(&mvm->mutex); - data->error = ret != 0; + data->have_rsc = true; + break; + } +} + +static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + WOWLAN_TSC_RSC_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + if (ver == 5) { + struct wowlan_key_rsc_v5_data data = {}; + int i; + + data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + if (!data.rsc) + return -ENOMEM; + + memset(data.rsc, 0xff, sizeof(*data.rsc)); + + for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) + data.rsc->mcast_key_id_map[i] = + IWL_MCAST_KEY_MAP_INVALID; + data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_v5_data, + &data); + + if (data.have_rsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, sizeof(*data.rsc), + data.rsc); + else + ret = 0; + kfree(data.rsc); + } else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { + struct wowlan_key_rsc_tsc_data data = {}; + int size; + + data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL); + if (!data.rsc_tsc) + return -ENOMEM; + + if (ver == 4) { + size = sizeof(*data.rsc_tsc); + data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + } else { + /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ + size = sizeof(data.rsc_tsc->params); + } + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_tsc_data, + &data); + + if (data.have_rsc_tsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, size, + data.rsc_tsc); + else + ret = 0; + kfree(data.rsc_tsc); + } else { + ret = 0; + WARN_ON_ONCE(1); + } + + return ret; +} + +struct wowlan_key_tkip_data { + struct iwl_wowlan_tkip_params_cmd tkip; + bool have_tkip_keys; +}; + +static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_tkip_data *data = _data; + struct iwl_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWL_P1K_SIZE]; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + u64 pn64; + + rx_p1ks = data->tkip.rx_uni; + + pn64 = atomic64_read(&key->tx_pn); + + ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), + p1k); + iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k); + + memcpy(data->tkip.mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + rx_mic_key = data->tkip.mic_keys.rx_unicast; + } else { + rx_p1ks = data->tkip.rx_multi; + rx_mic_key = data->tkip.mic_keys.rx_mcast; + } + + for (i = 0; i < IWL_NUM_RSC; i++) { + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32 + 1, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + data->have_tkip_keys = true; + break; + } +} + +struct wowlan_key_gtk_type_iter { + struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; +}; + +static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_gtk_type_iter *data = _data; + + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + return; + case WLAN_CIPHER_SUITE_AES_CMAC: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + return; + case WLAN_CIPHER_SUITE_CCMP: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_CCM); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_GCMP); + break; } } @@ -713,109 +981,81 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 cmd_flags) + struct ieee80211_vif *vif) { - struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; - struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd; - struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - struct wowlan_key_data key_data = { - .configure_keys = !unified, - .use_rsc_tsc = false, - .tkip = &tkip_cmd, - .use_tkip = false, - .kek_kck_cmd = _kek_kck_cmd, - }; + struct wowlan_key_reprogram_data key_data = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; u8 cmd_ver; size_t cmd_size; - key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL); - if (!key_data.rsc_tsc) - return -ENOMEM; - - /* - * if we have to configure keys, call ieee80211_iter_keys(), - * as we need non-atomic context in order to take the - * required locks. - */ - /* - * Note that currently we don't propagate cmd_flags - * to the iterator. In case of key_data.configure_keys, - * all the configured commands are SYNC, and - * iwl_mvm_wowlan_program_keys() will take care of - * locking/unlocking mvm->mutex. - */ - ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, - &key_data); + if (!unified) { + /* + * if we have to configure keys, call ieee80211_iter_keys(), + * as we need non-atomic context in order to take the + * required locks. + */ + /* + * Note that currently we don't use CMD_ASYNC in the iterator. + * In case of key_data.configure_keys, all the configured + * commands are SYNC, and iwl_mvm_wowlan_program_keys() will + * take care of locking/unlocking mvm->mutex. + */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, + &key_data); - if (key_data.error) { - ret = -EIO; - goto out; + if (key_data.error) + return -EIO; } - if (key_data.use_rsc_tsc) { - int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - WOWLAN_TSC_RSC_PARAM, - IWL_FW_CMD_VER_UNKNOWN); - int size; - - if (ver == 4) { - size = sizeof(*key_data.rsc_tsc); - key_data.rsc_tsc->sta_id = - cpu_to_le32(mvmvif->ap_sta_id); - - } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { - size = sizeof(key_data.rsc_tsc->params); - } else { - ret = 0; - WARN_ON_ONCE(1); - goto out; - } - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, - cmd_flags, - size, - key_data.rsc_tsc); - - if (ret) - goto out; - } + ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); + if (ret) + return ret; - if (key_data.use_tkip && - !fw_has_api(&mvm->fw->ucode_capa, + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, WOWLAN_TKIP_PARAM, IWL_FW_CMD_VER_UNKNOWN); + struct wowlan_key_tkip_data tkip_data = {}; int size; if (ver == 2) { - size = sizeof(tkip_cmd); - key_data.tkip->sta_id = + size = sizeof(tkip_data.tkip); + tkip_data.tkip.sta_id = cpu_to_le32(mvmvif->ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { - ret = -EINVAL; WARN_ON_ONCE(1); - goto out; + return -EINVAL; } - /* send relevant data according to CMD version */ - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_TKIP_PARAM, - cmd_flags, size, - &tkip_cmd); - if (ret) - goto out; + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data, + &tkip_data); + + if (tkip_data.have_tkip_keys) { + /* send relevant data according to CMD version */ + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + CMD_ASYNC, size, + &tkip_data.tkip); + if (ret) + return ret; + } } /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { + struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; + struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = + &kek_kck_cmd; + struct wowlan_key_gtk_type_iter gtk_type_data = { + .kek_kck_cmd = _kek_kck_cmd, + }; + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, WOWLAN_KEK_KCK_MATERIAL, @@ -824,6 +1064,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) return -EINVAL; + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter, + >k_type_data); + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, mvmvif->rekey_data.kck_len); kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); @@ -851,17 +1094,13 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", mvmvif->rekey_data.akm); - ret = iwl_mvm_send_cmd_pdu(mvm, - WOWLAN_KEK_KCK_MATERIAL, cmd_flags, - cmd_size, - _kek_kck_cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, + CMD_ASYNC, cmd_size, _kek_kck_cmd); if (ret) - goto out; + return ret; } - ret = 0; -out: - kfree(key_data.rsc_tsc); - return ret; + + return 0; } static int @@ -893,7 +1132,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); - ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif); mutex_lock(&mvm->mutex); if (ret) return ret; @@ -1694,9 +1933,12 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) status->gtk[0] = v7->gtk[0]; status->igtk[0] = v7->igtk[0]; - } else if (notif_ver == 9 || notif_ver == 10) { + } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; + /* these three command versions have same layout and size, the + * difference is only in a few not used (reserved) fields. + */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, cmd.resp_pkt->data, len); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 95f883aba148..5dc39fbb74d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -305,7 +305,6 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, int pos = 0; int bufsz = sizeof(buf); int tbl_idx; - u8 *value; if (!iwl_mvm_firmware_running(mvm)) return -EIO; @@ -321,16 +320,18 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { - value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0]; - pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", - value[1], value[2], value[0]); + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", - value[4], value[5], value[3]); + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); } mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 59cef0d89a6d..03e5bf5cb909 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -754,6 +754,33 @@ iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; } +static int +iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v8 *target) +{ + u32 flags; + int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); + + if (ret) + return ret; + + iwl_mvm_ftm_set_ndp_params(mvm, target); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && + !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } + + return 0; +} + static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) @@ -773,24 +800,53 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; - u32 flags; - err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); + err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); if (err) return err; + } - iwl_mvm_ftm_set_ndp_params(mvm, target); - - /* - * If secure LTF is turned off, replace the flag with PMF only - */ - flags = le32_to_cpu(target->initiator_ap_flags); - if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && - !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { - flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; - flags |= IWL_INITIATOR_AP_FLAGS_PMF; - target->initiator_ap_flags = cpu_to_le32(flags); + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + +static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v13 cmd; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); + if (err) + return err; + + if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) + target->bss_color = peer->ftm.bss_color; + + if (peer->ftm.non_trigger_based) { + target->min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + target->burst_period = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + } else { + target->min_time_between_msr = cpu_to_le16(0); } + + target->band = + iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); @@ -814,6 +870,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 13: + err = iwl_mvm_ftm_start_v13(mvm, vif, req); + break; case 12: err = iwl_mvm_ftm_start_v12(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 5a249ea97eb2..eba5433c2626 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #include <net/cfg80211.h> #include <linux/etherdevice.h> @@ -77,7 +77,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, - struct iwl_tof_responder_config_cmd_v8 *cmd) + struct iwl_tof_responder_config_cmd_v9 *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? @@ -104,7 +104,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd_v8 cmd = { + struct iwl_tof_responder_config_cmd_v9 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -115,10 +115,27 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD, 6); int err; + int cmd_size; lockdep_assert_held(&mvm->mutex); -if (cmd_ver == 8) + /* Use a default of bss_color=1 for now */ + if (cmd_ver == 9) { + cmd.cmd_valid_fields |= + cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | + IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); + cmd.bss_color = 1; + cmd.min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + cmd.max_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9); + } else { + /* All versions up to version 8 have the same size */ + cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8); + } + + if (cmd_ver >= 8) iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); if (cmd_ver >= 7) @@ -137,7 +154,7 @@ if (cmd_ver == 8) return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD, LOCATION_GROUP, 0), - 0, sizeof(cmd), &cmd); + 0, cmd_size, &cmd); } static int diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 38fd5886af2d..74404c96063b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -743,7 +743,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) /* all structs have the same common part, add it */ len += sizeof(cmd.common); - ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES, + ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, + IWL_NUM_CHAIN_TABLES, n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ @@ -1057,16 +1058,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { - int ret; - - ret = iwl_mvm_get_ppag_table(mvm); - if (ret < 0) { - IWL_DEBUG_RADIO(mvm, - "PPAG BIOS table invalid or unavailable. (%d)\n", - ret); - return 0; - } - + /* no need to read the table, done in INIT stage */ if (!dmi_check_system(dmi_ppag_approved_list)) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", @@ -1191,12 +1183,65 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) ret); } } + +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +{ + int ret; + + /* read PPAG table */ + ret = iwl_mvm_get_ppag_table(mvm); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "PPAG BIOS table invalid or unavailable. (%d)\n", + ret); + } + + /* read SAR tables */ + ret = iwl_sar_get_wrds_table(&mvm->fwrt); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* + * If not available, don't fail and don't bother with EWRD and + * WGDS */ + + if (!iwl_sar_get_wgds_table(&mvm->fwrt)) { + /* + * If basic SAR is not available, we check for WGDS, + * which should *not* be available either. If it is + * available, issue an error, because we can't use SAR + * Geo without basic SAR. + */ + IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); + } + + } else { + ret = iwl_sar_get_ewrd_table(&mvm->fwrt); + /* if EWRD is not available, we can still use + * WRDS, so don't fail */ + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "EWRD SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + /* read geo SAR table */ + if (iwl_sar_geo_support(&mvm->fwrt)) { + ret = iwl_sar_get_wgds_table(&mvm->fwrt); + if (ret < 0) + IWL_DEBUG_RADIO(mvm, + "Geo SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* we don't fail if the table is not available */ + } + } +} #else /* CONFIG_ACPI */ inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { - return -ENOENT; + return 1; } inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -1231,6 +1276,10 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { return DSM_VALUE_RFI_DISABLE; } + +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) +{ +} #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1286,27 +1335,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { - int ret; - - ret = iwl_sar_get_wrds_table(&mvm->fwrt); - if (ret < 0) { - IWL_DEBUG_RADIO(mvm, - "WRDS SAR BIOS table invalid or unavailable. (%d)\n", - ret); - /* - * If not available, don't fail and don't bother with EWRD. - * Return 1 to tell that we can't use WGDS either. - */ - return 1; - } - - ret = iwl_sar_get_ewrd_table(&mvm->fwrt); - /* if EWRD is not available, we can still use WRDS, so don't fail */ - if (ret < 0) - IWL_DEBUG_RADIO(mvm, - "EWRD SAR BIOS table invalid or unavailable. (%d)\n", - ret); - return iwl_mvm_sar_select_profile(mvm, 1, 1); } @@ -1542,19 +1570,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; ret = iwl_mvm_sar_init(mvm); - if (ret == 0) { + if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); - } else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) { - /* - * If basic SAR is not available, we check for WGDS, - * which should *not* be available either. If it is - * available, issue an error, because we can't use SAR - * Geo without basic SAR. - */ - IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); - } - - if (ret < 0) + else if (ret < 0) goto error; iwl_mvm_tas_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index fd5e08961651..fd352b2624a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -647,12 +647,14 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) { + if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); - if (vif->bss_conf.twt_protected) - ctxt_sta->data_policy |= - cpu_to_le32(PROTECTED_TWT_SUPPORTED); - } + if (vif->bss_conf.twt_protected) + ctxt_sta->data_policy |= + cpu_to_le32(PROTECTED_TWT_SUPPORTED); + if (vif->bss_conf.twt_broadcast) + ctxt_sta->data_policy |= + cpu_to_le32(BROADCAST_TWT_SUPPORTED); } @@ -1005,8 +1007,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, return -ENOMEM; #ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->beacon_inject_active) + if (mvm->beacon_inject_active) { + dev_kfree_skb(beacon); return -EBUSY; + } #endif ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); @@ -1427,14 +1431,34 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); - struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; + u8 *data; u32 size = le32_to_cpu(sb->byte_count); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP, + STORED_BEACON_NTF, 0); - if (size == 0 || pkt_len < struct_size(sb, data, size)) + if (size == 0) return; + /* handle per-version differences */ + if (ver <= 2) { + struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data; + + if (pkt_len < struct_size(sb_v2, data, size)) + return; + + data = sb_v2->data; + } else { + struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; + + if (pkt_len < struct_size(sb_v3, data, size)) + return; + + data = sb_v3->data; + } + skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); @@ -1455,7 +1479,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, rx_status.band); /* copy the data */ - skb_put_data(skb, sb->data, size); + skb_put_data(skb, data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 70ebecb73c24..3a4585222d6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -390,7 +390,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - hw->queues = IEEE80211_MAX_QUEUES; + hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -762,11 +762,11 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; - /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ - if ((info->control.vif->type == NL80211_IFTYPE_AP || - info->control.vif->type == NL80211_IFTYPE_ADHOC) && - ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) + /* + * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs + * so we treat the others as broadcast + */ + if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ @@ -2440,6 +2440,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); iwl_mvm_configure_bcast_filter(mvm); } + + if (changes & BSS_CHANGED_BANDWIDTH) + iwl_mvm_apply_fw_smps_request(vif); } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, @@ -2987,16 +2990,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; + const struct cfg80211_bss_ies *ies; const struct element *elem; - elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data, - bss->ies->len); + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, + ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } + rcu_read_unlock(); } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, @@ -5035,22 +5042,14 @@ static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { - if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED || - mlme->status == MLME_TIMEOUT)) { + if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && + (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } - if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED || - mlme->status == MLME_TIMEOUT)) { - iwl_dbg_tlv_time_point(&mvm->fwrt, - IWL_FW_INI_TIME_POINT_EAPOL_FAILED, - NULL); - return; - } - if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b50942f28bb7..f877d86b038e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -431,8 +431,6 @@ struct iwl_mvm_vif { static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { - if (!vif) - return NULL; return (void *)vif->drv_priv; } @@ -2045,6 +2043,7 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); +void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 7fb4e618f76e..da705fcaf0fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -416,7 +416,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, - .flags = CMD_WANT_SKB, + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &mcc_update_cmd }, }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 20e8d343a950..6f60018feed1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -78,7 +78,6 @@ module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_trans_debug *dbg = &mvm->trans->dbg; u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val = 0; u32 phy_config = iwl_mvm_get_phy_config(mvm); @@ -115,10 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; - if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) || - (iwl_trans_dbg_ini_valid(mvm->trans) && - dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location) - ) + if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, @@ -214,11 +210,14 @@ void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, - mvm->fw_static_smps_request ? - IEEE80211_SMPS_STATIC : - IEEE80211_SMPS_AUTOMATIC); + if (mvm->fw_static_smps_request && + vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 && + vif->bss_conf.he_support) + mode = IEEE80211_SMPS_STATIC; + + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode); } static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, @@ -374,7 +373,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { struct iwl_mfu_assert_dump_notif), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC, - struct iwl_stored_beacon_notif), + struct iwl_stored_beacon_notif_v2), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC, struct iwl_mu_group_mgmt_notif), @@ -693,11 +692,16 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); + if (!ret && iwl_mvm_is_lar_supported(mvm)) { + mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; + ret = iwl_mvm_init_mcc(mvm); + } if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + rtnl_unlock(); if (ret < 0) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); @@ -772,6 +776,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, dbgfs_dir); + iwl_mvm_get_acpi_tables(mvm); + mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { @@ -792,10 +798,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; - mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + if (iwl_mvm_has_new_tx_api(mvm)) { + /* + * If we have the new TX/queue allocation API initialize them + * all to invalid numbers. We'll rewrite the ones that we need + * later, but that doesn't happen for all of them all of the + * time (e.g. P2P Device is optional), and if a dynamic queue + * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then + * iwl_mvm_is_static_queue() erroneously returns true, and we + * might have things getting stuck. + */ + mvm->aux_queue = IWL_MVM_INVALID_QUEUE; + mvm->snif_queue = IWL_MVM_INVALID_QUEUE; + mvm->probe_queue = IWL_MVM_INVALID_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; + } else { + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + } mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) @@ -1400,7 +1422,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_fw_error_collect(&mvm->fwrt); + iwl_fw_error_collect(&mvm->fwrt, false); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1451,7 +1473,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } - iwl_fw_error_collect(&mvm->fwrt); + iwl_fw_error_collect(&mvm->fwrt, false); if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; @@ -1459,13 +1481,31 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } } -static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) +static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) iwl_mvm_dump_nic_error_log(mvm); + if (sync) { + iwl_fw_error_collect(&mvm->fwrt, true); + /* + * Currently, the only case for sync=true is during + * shutdown, so just stop in this case. If/when that + * changes, we need to be a bit smarter here. + */ + return; + } + + /* + * If the firmware crashes while we're already considering it + * to be dead then don't ask for a restart, that cannot do + * anything useful anyway. + */ + if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) + return; + iwl_mvm_nic_restart(mvm, true); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c index 0b818067067c..44344216a1a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rfi.c @@ -11,7 +11,7 @@ * DDR needs frequency in units of 16.666MHz, so provide FW with the * frequency values in the adjusted format. */ -const static struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { +static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { /* LPDDR4 */ /* frequency 3733MHz */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index c0babb8d5b5c..c12f303cf652 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -69,8 +69,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, /* if we are here - this for sure is either CCMP or GCMP */ if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, - "expected hw-decrypted unicast frame for station\n"); + IWL_DEBUG_DROP(mvm, + "expected hw-decrypted unicast frame for station\n"); return -1; } @@ -279,7 +279,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; - u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY); u8 keyid; struct ieee80211_key_conf *key; u32 len = le16_to_cpu(desc->mpdu_len); @@ -299,6 +298,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, if (!ieee80211_is_beacon(hdr->frame_control)) return 0; + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ + if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) + return -1; + /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) @@ -309,26 +312,36 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, mvmsta = iwl_mvm_sta_from_mac80211(sta); - /* what? */ - if (fwkeyid != 6 && fwkeyid != 7) - return -1; - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]); - if (!key) - return -1; + /* + * both keys will have the same cipher and MIC length, use + * whichever one is available + */ + key = rcu_dereference(mvmvif->bcn_prot.keys[0]); + if (!key) { + key = rcu_dereference(mvmvif->bcn_prot.keys[1]); + if (!key) + return -1; + } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) return -1; - /* - * See if the key ID matches - if not this may be due to a - * switch and the firmware may erroneously report !MIC_OK. - */ + /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; - if (keyid != fwkeyid) - return -1; + /* and if that's the other key, look it up */ + if (keyid != key->keyidx) { + /* + * shouldn't happen since firmware checked, but be safe + * in case the MIC length is wrong too, for example + */ + if (keyid != 6 && keyid != 7) + return -1; + key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); + if (!key) + return -1; + } /* Report status to mac80211 */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 0368b7101222..d78e436fa8b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1648,7 +1648,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; u32 n_aps_flag = iwl_mvm_scan_ch_n_aps_flag(vif_type, - cfg->v2.channel_num); + channels[i]->hw_value); cfg->flags = cpu_to_le32(flags | n_aps_flag); cfg->v2.channel_num = channels[i]->hw_value; @@ -1661,22 +1661,32 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, } static int -iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params, - __le32 *cmd_short_ssid, u8 *cmd_bssid, - u8 *scan_ssid_num, u8 *bssid_num) +iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct iwl_scan_probe_params_v4 *pp) { int j, idex_s = 0, idex_b = 0; struct cfg80211_scan_6ghz_params *scan_6ghz_params = params->scan_6ghz_params; + bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN); - if (!params->n_6ghz_params) { - for (j = 0; j < params->n_ssids; j++) { - cmd_short_ssid[idex_s++] = - cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, - params->ssids[j].ssid_len)); - (*scan_ssid_num)++; + for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE; + j++) { + if (!params->ssids[j].ssid_len) + continue; + + pp->short_ssid[idex_s] = + cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, + params->ssids[j].ssid_len)); + + if (hidden_supported) { + pp->direct_scan[idex_s].id = WLAN_EID_SSID; + pp->direct_scan[idex_s].len = params->ssids[j].ssid_len; + memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid, + params->ssids[j].ssid_len); } - return 0; + idex_s++; } /* @@ -1693,40 +1703,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params, /* First, try to place the short SSID */ if (scan_6ghz_params[j].short_ssid_valid) { for (k = 0; k < idex_s; k++) { - if (cmd_short_ssid[k] == + if (pp->short_ssid[k] == cpu_to_le32(scan_6ghz_params[j].short_ssid)) break; } if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) { - cmd_short_ssid[idex_s++] = + pp->short_ssid[idex_s++] = cpu_to_le32(scan_6ghz_params[j].short_ssid); - (*scan_ssid_num)++; } } /* try to place BSSID for the same entry */ for (k = 0; k < idex_b; k++) { - if (!memcmp(&cmd_bssid[ETH_ALEN * k], + if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) break; } if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { - memcpy(&cmd_bssid[ETH_ALEN * idex_b++], + memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); - (*bssid_num)++; } } + + pp->short_ssid_num = idex_s; + pp->bssid_num = idex_b; return 0; } /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ static void iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, - u32 n_channels, __le32 *cmd_short_ssid, - u8 *cmd_bssid, u8 scan_ssid_num, - u8 bssid_num, + u32 n_channels, + struct iwl_scan_probe_params_v4 *pp, struct iwl_scan_channel_params_v6 *cp, enum nl80211_iftype vif_type) { @@ -1741,7 +1751,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; - bool force_passive, found = false, + bool force_passive, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; cfg->v1.channel_num = params->channels[i]->hw_value; @@ -1766,9 +1776,9 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, scan_6ghz_params[j].unsolicited_probe; psc_no_listen |= scan_6ghz_params[j].psc_no_listen; - for (k = 0; k < scan_ssid_num; k++) { + for (k = 0; k < pp->short_ssid_num; k++) { if (!scan_6ghz_params[j].unsolicited_probe && - le32_to_cpu(cmd_short_ssid[k]) == + le32_to_cpu(pp->short_ssid[k]) == scan_6ghz_params[j].short_ssid) { /* Relevant short SSID bit set */ if (s_ssid_bitmap & BIT(k)) { @@ -1778,7 +1788,10 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, /* * Use short SSID only to create a new - * iteration during channel dwell. + * iteration during channel dwell or in + * case that the short SSID has a + * matching SSID, i.e., scan for hidden + * APs. */ if (n_used_bssid_entries >= 3) { s_ssid_bitmap |= BIT(k); @@ -1786,6 +1799,12 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, n_used_bssid_entries -= 3; found = true; break; + } else if (pp->direct_scan[k].len) { + s_ssid_bitmap |= BIT(k); + s_max++; + found = true; + allow_passive = false; + break; } } } @@ -1793,8 +1812,8 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, if (found) continue; - for (k = 0; k < bssid_num; k++) { - if (!memcmp(&cmd_bssid[ETH_ALEN * k], + for (k = 0; k < pp->bssid_num; k++) { + if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) { if (!(bssid_bitmap & BIT(k))) { @@ -1849,7 +1868,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, force_passive |= (unsolicited_probe_on_chan && (s_max > 1 || b_max > 3)); } - if (force_passive || + if ((allow_passive && force_passive) || (!flags && !cfg80211_channel_is_psc(params->channels[i]))) flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; @@ -2368,32 +2387,28 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return ret; - iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, - &bitmap_ssid); if (!params->scan_6ghz) { + iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, + &bitmap_ssid); iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, - &scan_p->channel_params, bitmap_ssid); + &scan_p->channel_params, bitmap_ssid); return 0; + } else { + pb->preq = params->preq; } + cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; - ret = iwl_mvm_umac_scan_fill_6g_chan_list(params, pb->short_ssid, - pb->bssid_array[0], - &pb->short_ssid_num, - &pb->bssid_num); + ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); if (ret) return ret; iwl_mvm_umac_scan_cfg_channels_v6_6g(params, params->n_channels, - pb->short_ssid, - pb->bssid_array[0], - pb->short_ssid_num, - pb->bssid_num, cp, - vif->type); + pb, cp, vif->type); cp->count = params->n_channels; if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9c45a64c5009..a64874c05ced 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -316,8 +316,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int queue, u8 tid, u8 flags) + u16 *queueptr, u8 tid, u8 flags) { + int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, @@ -326,6 +327,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (iwl_mvm_has_new_tx_api(mvm)) { iwl_trans_txq_free(mvm->trans, queue); + *queueptr = IWL_MVM_INVALID_QUEUE; return 0; } @@ -487,6 +489,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; + u16 queue_tmp = queue; int ret; lockdep_assert_held(&mvm->mutex); @@ -509,7 +512,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); + ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -1184,6 +1187,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; + u16 queue_tmp; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; @@ -1332,7 +1336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return 0; out_err: - iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); + queue_tmp = queue; + iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0); return ret; } @@ -1779,7 +1784,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, + iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i, 0); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } @@ -1987,7 +1992,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_disable_txq(mvm, NULL, *queue, + iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); return ret; } @@ -2060,7 +2065,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2077,7 +2082,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2173,7 +2178,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int queue; + u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); @@ -2182,10 +2187,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - queue = mvm->probe_queue; + queueptr = &mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: - queue = mvm->p2p_dev_queue; + queueptr = &mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", @@ -2193,7 +2198,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, return; } - iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); + queue = *queueptr; + iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0); if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2428,7 +2434,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); - iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -3190,6 +3196,20 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, return NULL; } +static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (pn1[i] > pn2[i]) + return 1; + if (pn1[i] < pn2[i]) + return -1; + } + + return 0; +} + static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, @@ -3208,6 +3228,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); + int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, + ADD_STA_KEY, + new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; @@ -3220,7 +3243,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); - if (new_api) { + if (api_ver >= 2) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); @@ -3241,7 +3264,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); - if (new_api) + if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: @@ -3257,7 +3280,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); - if (new_api) + if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; default: @@ -3274,7 +3297,46 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; - if (new_api) { + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) + i = 0; + else + i = -1; + + for (; i < IEEE80211_NUM_TIDS; i++) { + struct ieee80211_key_seq seq = {}; + u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; + int rx_pn_len = 8; + /* there's a hole at 2/3 in FW format depending on version */ + int hole = api_ver >= 3 ? 0 : 2; + + ieee80211_get_key_rx_seq(key, i, &seq); + + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { + rx_pn[0] = seq.tkip.iv16; + rx_pn[1] = seq.tkip.iv16 >> 8; + rx_pn[2 + hole] = seq.tkip.iv32; + rx_pn[3 + hole] = seq.tkip.iv32 >> 8; + rx_pn[4 + hole] = seq.tkip.iv32 >> 16; + rx_pn[5 + hole] = seq.tkip.iv32 >> 24; + } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { + rx_pn = seq.hw.seq; + rx_pn_len = seq.hw.seq_len; + } else { + rx_pn[0] = seq.ccmp.pn[0]; + rx_pn[1] = seq.ccmp.pn[1]; + rx_pn[2 + hole] = seq.ccmp.pn[2]; + rx_pn[3 + hole] = seq.ccmp.pn[3]; + rx_pn[4 + hole] = seq.ccmp.pn[4]; + rx_pn[5 + hole] = seq.ccmp.pn[5]; + } + + if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, + rx_pn_len) > 0) + memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, + rx_pn_len); + } + + if (api_ver >= 2) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { @@ -3411,7 +3473,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, u8 key_offset, bool mcast) { - int ret; const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; @@ -3433,30 +3494,19 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, return -EINVAL; } - switch (keyconf->cipher) { - case WLAN_CIPHER_SUITE_TKIP: + if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - seq.tkip.iv32, p1k, 0, key_offset, - mfp); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset, mfp); - break; - default: - ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset, mfp); + + return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, + seq.tkip.iv32, p1k, 0, key_offset, + mfp); } - return ret; + return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, + 0, NULL, 0, key_offset, mfp); } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index d3307a11fcac..25af88a3edce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -168,6 +168,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, rcu_read_unlock(); } + if (vif->bss_conf.assoc) { + /* + * When not associated, this will be called from + * iwl_mvm_event_mlme_callback_ini() + */ + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_ASSOC_FAILED, + NULL); + } + iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } @@ -246,6 +256,18 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, } } +static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) +{ + /* + * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the + * roc_done_wk is already scheduled or running, so don't schedule it + * again to avoid a race where the roc_done_wk clears this bit after + * it is set here, affecting the next run of the roc_done_wk. + */ + if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) + iwl_mvm_roc_finished(mvm); +} + /* * Handles a FW notification for an event that is known to the driver. * @@ -297,8 +319,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: /* @@ -674,8 +695,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, mvmvif, id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); } } return false; @@ -842,8 +862,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; ieee80211_remain_on_channel_expired(mvm->hw); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); - iwl_mvm_roc_finished(mvm); + iwl_mvm_p2p_roc_finished(mvm); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) @@ -1004,14 +1023,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_cancel_session_protection(mvm, mvmvif, mvmvif->time_event_data.id); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, &mvmvif->time_event_data); + iwl_mvm_roc_finished(mvm); } - iwl_mvm_roc_finished(mvm); - return; } @@ -1025,12 +1043,11 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); - set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); + iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); + iwl_mvm_roc_finished(mvm); } - - iwl_mvm_roc_finished(mvm); } void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 0b8a0cd3b652..8dc1b8eecb86 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1093,22 +1093,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_hr_b0, iwl_ax201_name), + iwl_cfg_bz_a0_hr_b0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_gf_a0, iwl_ax211_name), + iwl_cfg_bz_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, - iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name), + iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_bz_a0_mr_a0, iwl_ax211_name), + iwl_cfg_bz_a0_mr_a0, iwl_bz_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cc550f6ef957..a43e56c7689f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -42,6 +42,7 @@ struct iwl_host_cmd; * struct iwl_rx_mem_buffer * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page + * @list: list entry for the membuffer * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table * @offset: indicates which offset of the page (in bytes) @@ -50,10 +51,10 @@ struct iwl_host_cmd; struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; - u16 vid; - bool invalid; struct list_head list; u32 offset; + u16 vid; + bool invalid; }; /** @@ -253,6 +254,13 @@ struct cont_rec { }; #endif +enum iwl_pcie_fw_reset_state { + FW_RESET_IDLE, + FW_RESET_REQUESTED, + FW_RESET_OK, + FW_RESET_ERROR, +}; + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data @@ -404,7 +412,7 @@ struct iwl_trans_pcie { dma_addr_t base_rb_stts_dma; bool fw_reset_handshake; - bool fw_reset_done; + enum iwl_pcie_fw_reset_state fw_reset_state; wait_queue_head_t fw_reset_waitq; char rf_name[32]; @@ -670,19 +678,19 @@ static inline const char *queue_name(struct device *dev, IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; if (i == 0) - return DRV_NAME ": shared IRQ"; + return DRV_NAME ":shared_IRQ"; return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i + vec); + DRV_NAME ":queue_%d", i + vec); } if (i == 0) - return DRV_NAME ": default queue"; + return DRV_NAME ":default_queue"; if (i == trans_p->alloc_vecs - 1) - return DRV_NAME ": exception"; + return DRV_NAME ":exception"; return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i); + DRV_NAME ":queue_%d", i); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4f6f4b2720f0..8e45eb38304b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -487,6 +487,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; + if (!trans_pcie->rx_pool) + return; + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { if (!trans_pcie->rx_pool[i].page) continue; @@ -1062,7 +1065,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock_bh(&rba->lock); - /* free all first - we might be reconfigured for a different size */ + /* free all first - we overwrite everything here */ iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) @@ -1653,7 +1656,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ - iwl_trans_fw_error(trans); + iwl_trans_fw_error(trans, false); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans->wait_command_queue); @@ -2225,7 +2228,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); isr_stats->sw++; - iwl_pcie_irq_handle_error(trans); + /* during FW reset flow report errors from there */ + if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + trans_pcie->fw_reset_state = FW_RESET_ERROR; + wake_up(&trans_pcie->fw_reset_waitq); + } else { + iwl_pcie_irq_handle_error(trans); + } } /* After checking FH register check HW register */ @@ -2293,7 +2302,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { IWL_DEBUG_ISR(trans, "Reset flow completed\n"); - trans_pcie->fw_reset_done = true; + trans_pcie->fw_reset_state = FW_RESET_OK; wake_up(&trans_pcie->fw_reset_waitq); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index a34009357227..bf0c32a74ca4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -87,7 +87,12 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) @@ -95,7 +100,7 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; - trans_pcie->fw_reset_done = false; + trans_pcie->fw_reset_state = FW_RESET_REQUESTED; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, @@ -106,10 +111,15 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, - trans_pcie->fw_reset_done, FW_RESET_TIMEOUT); - if (!ret) + trans_pcie->fw_reset_state != FW_RESET_REQUESTED, + FW_RESET_TIMEOUT); + if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { IWL_INFO(trans, "firmware didn't ACK the reset - continue anyway\n"); + iwl_trans_fw_error(trans, true); + } + + trans_pcie->fw_reset_state = FW_RESET_IDLE; } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) @@ -121,9 +131,21 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (trans_pcie->is_down) return; - if (trans_pcie->fw_reset_handshake && - trans->state >= IWL_TRANS_FW_STARTED) - iwl_trans_pcie_fw_reset_handshake(trans); + if (trans->state >= IWL_TRANS_FW_STARTED) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); + iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + 5000); + msleep(100); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + } else if (trans_pcie->fw_reset_handshake) { + iwl_trans_pcie_fw_reset_handshake(trans); + } + } trans_pcie->is_down = true; @@ -154,9 +176,17 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_SW_RESET); + } /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); @@ -436,7 +466,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, iwl_pcie_set_ltr(trans); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_ROM_START); + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); else iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index bee6b4574226..f252680f18e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -449,11 +449,23 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) int ret; /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); - ret = iwl_poll_bit(trans, CSR_RESET, - CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); + + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, + 100); + } else { + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(trans, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + } + if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); @@ -1866,6 +1878,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_free_rbs_pool(trans); + trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; @@ -1992,15 +2007,24 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; + u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; + u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; + mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; + } + /* this bit wakes up the NIC */ - __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); @@ -2024,10 +2048,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | - CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); @@ -2947,8 +2968,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; struct iwl_fw_error_dump_rb *rb; - dma_unmap_page(trans->dev, rxb->page_dma, max_len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(trans->dev, rxb->page_dma, + max_len, DMA_FROM_DEVICE); rb_len += sizeof(**data) + sizeof(*rb) + max_len; @@ -2957,10 +2978,6 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, rb = (void *)(*data)->data; rb->index = cpu_to_le32(i); memcpy(rb->data, page_address(rxb->page), max_len); - /* remap the page for the free benefit */ - rxb->page_dma = dma_map_page(trans->dev, rxb->page, - rxb->offset, max_len, - DMA_FROM_DEVICE); *data = iwl_fw_error_next_data(*data); } @@ -3489,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(addr_size)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!ret) - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile index 162d557b78af..2bd00f40958e 100644 --- a/drivers/net/wireless/marvell/mwifiex/Makefile +++ b/drivers/net/wireless/marvell/mwifiex/Makefile @@ -49,6 +49,7 @@ mwifiex_sdio-y += sdio.o obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o mwifiex_pcie-y += pcie.o +mwifiex_pcie-y += pcie_quirks.o obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o mwifiex_usb-y += usb.o diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 46517515ba72..c6ccce426b49 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -27,6 +27,7 @@ #include "wmm.h" #include "11n.h" #include "pcie.h" +#include "pcie_quirks.h" #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" @@ -410,6 +411,9 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, return ret; } + /* check quirks */ + mwifiex_initialize_quirks(card); + if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, MWIFIEX_PCIE, &pdev->dev)) { pr_err("%s failed\n", __func__); @@ -524,6 +528,13 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) mwifiex_shutdown_sw(adapter); clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); + + /* On MS Surface gen4+ devices FLR isn't effective to recover from + * hangups, so we power-cycle the card instead. + */ + if (card->quirks & QUIRK_FW_RST_D3COLD) + mwifiex_pcie_reset_d3cold_quirk(pdev); + mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); card->pci_reset_ongoing = true; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 5ed613d65709..981e330c77d7 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -244,6 +244,7 @@ struct pcie_service_card { unsigned long work_flags; bool pci_reset_ongoing; + unsigned long quirks; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c new file mode 100644 index 000000000000..0234cf3c2974 --- /dev/null +++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.c @@ -0,0 +1,161 @@ +/* + * NXP Wireless LAN device driver: PCIE and platform specific quirks + * + * This software file (the "File") is distributed by NXP + * under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include <linux/dmi.h> + +#include "pcie_quirks.h" + +/* quirk table based on DMI matching */ +static const struct dmi_system_id mwifiex_quirk_table[] = { + { + .ident = "Surface Pro 4", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 5", + .matches = { + /* match for SKU here due to generic product name "Surface Pro" */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 5 (LTE)", + .matches = { + /* match for SKU here due to generic product name "Surface Pro" */ + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Pro 6", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Book 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Book 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Laptop 1", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + { + .ident = "Surface Laptop 2", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), + }, + .driver_data = (void *)QUIRK_FW_RST_D3COLD, + }, + {} +}; + +void mwifiex_initialize_quirks(struct pcie_service_card *card) +{ + struct pci_dev *pdev = card->dev; + const struct dmi_system_id *dmi_id; + + dmi_id = dmi_first_match(mwifiex_quirk_table); + if (dmi_id) + card->quirks = (uintptr_t)dmi_id->driver_data; + + if (!card->quirks) + dev_info(&pdev->dev, "no quirks enabled\n"); + if (card->quirks & QUIRK_FW_RST_D3COLD) + dev_info(&pdev->dev, "quirk reset_d3cold enabled\n"); +} + +static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev) +{ + dev_info(&pdev->dev, "putting into D3cold...\n"); + + pci_save_state(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); +} + +static int mwifiex_pcie_set_power_d0(struct pci_dev *pdev) +{ + int ret; + + dev_info(&pdev->dev, "putting into D0...\n"); + + pci_set_power_state(pdev, PCI_D0); + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + return ret; + } + pci_restore_state(pdev); + + return 0; +} + +int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev) +{ + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + + /* Power-cycle (put into D3cold then D0) */ + dev_info(&pdev->dev, "Using reset_d3cold quirk to perform FW reset\n"); + + /* We need to perform power-cycle also for bridge of wifi because + * on some devices (e.g. Surface Book 1), the OS for some reasons + * can't know the real power state of the bridge. + * When tried to power-cycle only wifi, the reset failed with the + * following dmesg log: + * "Cannot transition to power state D0 for parent in D3hot". + */ + mwifiex_pcie_set_power_d3cold(pdev); + mwifiex_pcie_set_power_d3cold(parent_pdev); + + ret = mwifiex_pcie_set_power_d0(parent_pdev); + if (ret) + return ret; + ret = mwifiex_pcie_set_power_d0(pdev); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h new file mode 100644 index 000000000000..8ec4176d698f --- /dev/null +++ b/drivers/net/wireless/marvell/mwifiex/pcie_quirks.h @@ -0,0 +1,23 @@ +/* + * NXP Wireless LAN device driver: PCIE and platform specific quirks + * + * This software file (the "File") is distributed by NXP + * under the terms of the GNU General Public License Version 2, June 1991 + * (the "License"). You may use, redistribute and/or modify this File in + * accordance with the terms and conditions of the License, a copy of which + * is available by writing to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the + * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. + * + * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE + * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE + * ARE EXPRESSLY DISCLAIMED. The License provides additional details about + * this warranty disclaimer. + */ + +#include "pcie.h" + +#define QUIRK_FW_RST_D3COLD BIT(0) + +void mwifiex_initialize_quirks(struct pcie_service_card *card); +int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev); diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 8b180c29d682..dd481dc0b5ce 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -39,6 +39,7 @@ MODULE_PARM_DESC(enable_crc16, #define WILC_SPI_RSP_HDR_EXTRA_DATA 8 struct wilc_spi { + bool isinit; /* true if SPI protocol has been configured */ bool probing_crc; /* true if we're probing chip's CRC config */ bool crc7_enabled; /* true if crc7 is currently enabled */ bool crc16_enabled; /* true if crc16 is currently enabled */ @@ -908,15 +909,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) struct wilc_spi *spi_priv = wilc->bus_data; u32 reg; u32 chipid; - static int isinit; int ret, i; - if (isinit) { + if (spi_priv->isinit) { + /* Confirm we can read chipid register without error: */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); - if (ret) - dev_err(&spi->dev, "Fail cmd read chip id...\n"); + if (ret == 0) + return 0; - return ret; + dev_err(&spi->dev, "Fail cmd read chip id...\n"); } /* @@ -974,7 +975,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) spi_priv->probing_crc = false; /* - * make sure can read back chip id correctly + * make sure can read chip id without protocol error */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { @@ -982,7 +983,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume) return ret; } - isinit = 1; + spi_priv->isinit = true; return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 8ae69d914312..9b83c710c9b8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -896,7 +896,7 @@ static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw, static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) { - u8 place; + u8 place = chnl; if (chnl > 14) { for (place = 14; place < sizeof(channel5g); place++) { @@ -1363,7 +1363,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) { - u8 place = chnl; + u8 place; if (chnl > 14) { for (place = 14; place < sizeof(channel_all); place++) { diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index cfb9f1ea30a0..a7a6ebfaa203 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1729,6 +1729,15 @@ static const struct dmi_system_id rtw88_pci_quirks[] = { }, .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), }, + { + .callback = disable_pci_caps, + .ident = "HP HP Pavilion Laptop 14-ce0xxx", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"), + }, + .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM), + }, {} }; diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 24a417ea2ae7..bf22fd948276 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -117,7 +117,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) { struct rsi_common *common = seq->private; - unsigned char fsm_state[][32] = { + static const unsigned char fsm_state[][32] = { "FSM_FW_NOT_LOADED", "FSM_CARD_NOT_READY", "FSM_COMMON_DEV_PARAMS_SENT", diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c index d0a98f34c54d..e4d0f696687f 100644 --- a/drivers/net/wwan/mhi_wwan_ctrl.c +++ b/drivers/net/wwan/mhi_wwan_ctrl.c @@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port) int ret; /* Start mhi device's channel(s) */ - ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0); + ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev); if (ret) return ret; diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 377529bbf124..71bf9b4f769f 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -609,7 +609,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) return err; diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index 8fa7771085eb..8edf761a6b2a 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/nfc.h> -#include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "../mei_phy.h" diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c index 9d83ccebd434..bb4d029bb888 100644 --- a/drivers/nfc/microread/microread.c +++ b/drivers/nfc/microread/microread.c @@ -15,7 +15,6 @@ #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/hci.h> -#include <net/nfc/llc.h> #include "microread.h" diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index c38b228006fd..ceef81d93ac9 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -8,12 +8,9 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/i2c.h> -#include <linux/pm_runtime.h> #include <linux/nfc.h> -#include <linux/gpio.h> #include <linux/delay.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index b182ab2e03c0..5b833a9a83f8 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -7,11 +7,8 @@ #include <linux/module.h> #include <linux/interrupt.h> -#include <linux/pm_runtime.h> #include <linux/nfc.h> -#include <linux/gpio.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/spi/spi.h> diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c index 092f03b80a78..32a61a185142 100644 --- a/drivers/nfc/pn544/pn544.c +++ b/drivers/nfc/pn544/pn544.c @@ -13,7 +13,6 @@ #include <linux/nfc.h> #include <net/nfc/hci.h> -#include <net/nfc/llc.h> #include "pn544.h" diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c index 72bb51efdf9c..a367136d4330 100644 --- a/drivers/nfc/st-nci/core.c +++ b/drivers/nfc/st-nci/core.c @@ -9,8 +9,6 @@ #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> -#include <linux/gpio.h> -#include <linux/delay.h> #include "st-nci.h" diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c index 5e6c99fcfd27..161caf2675cf 100644 --- a/drivers/nfc/st21nfca/core.c +++ b/drivers/nfc/st21nfca/core.c @@ -8,7 +8,6 @@ #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/hci.h> -#include <net/nfc/llc.h> #include "st21nfca.h" diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 1b44a37a71aa..279d88128b2e 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -18,8 +18,6 @@ #include <linux/nfc.h> #include <linux/firmware.h> -#include <asm/unaligned.h> - #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include <net/nfc/nfc.h> diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 993818742570..d16cf3ff644e 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -16,7 +16,6 @@ #include <linux/nfc.h> #include <linux/of_gpio.h> #include <linux/of.h> -#include <linux/of_irq.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> diff --git a/drivers/opp/core.c b/drivers/opp/core.c index b335c077f215..5543c54dacc5 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1856,9 +1856,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->supported_hw); opp_table->supported_hw = NULL; opp_table->supported_hw_count = 0; @@ -1944,9 +1941,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - kfree(opp_table->prop_name); opp_table->prop_name = NULL; @@ -2056,9 +2050,6 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) if (!opp_table->regulators) goto put_opp_table; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - if (opp_table->enabled) { for (i = opp_table->regulator_count - 1; i >= 0; i--) regulator_disable(opp_table->regulators[i]); @@ -2178,9 +2169,6 @@ void dev_pm_opp_put_clkname(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - clk_put(opp_table->clk); opp_table->clk = ERR_PTR(-EINVAL); @@ -2279,9 +2267,6 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) if (unlikely(!opp_table)) return; - /* Make sure there are no concurrent readers while updating opp_table */ - WARN_ON(!list_empty(&opp_table->opp_list)); - opp_table->set_opp = NULL; mutex_lock(&opp_table->lock); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index d298e38aaf7e..67f2e0710e79 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -964,8 +964,9 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) } } - /* There should be one of more OPP defined */ - if (WARN_ON(!count)) { + /* There should be one or more OPPs defined */ + if (!count) { + dev_err(dev, "%s: no supported OPPs", __func__); ret = -ENOENT; goto remove_static_opp; } diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c index 896a45b24236..654ac4a82beb 100644 --- a/drivers/pci/controller/pci-ixp4xx.c +++ b/drivers/pci/controller/pci-ixp4xx.c @@ -145,7 +145,7 @@ static int ixp4xx_pci_check_master_abort(struct ixp4xx_pci *p) return 0; } -static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) +static int ixp4xx_pci_read_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -170,7 +170,7 @@ static int ixp4xx_pci_read(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 *data) return ixp4xx_pci_check_master_abort(p); } -static int ixp4xx_pci_write(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) +static int ixp4xx_pci_write_indirect(struct ixp4xx_pci *p, u32 addr, u32 cmd, u32 data) { ixp4xx_writel(p, IXP4XX_PCI_NP_AD, addr); @@ -308,7 +308,7 @@ static int ixp4xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "read_config from %d size %d dev %d:%d:%d address: %08x cmd: %08x\n", where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_read(p, addr, cmd, &val); + ret = ixp4xx_pci_read_indirect(p, addr, cmd, &val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; @@ -356,7 +356,7 @@ static int ixp4xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, dev_dbg(p->dev, "write_config_byte %#x to %d size %d dev %d:%d:%d addr: %08x cmd %08x\n", value, where, size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), addr, cmd); - ret = ixp4xx_pci_write(p, addr, cmd, val); + ret = ixp4xx_pci_write_indirect(p, addr, cmd, val); if (ret) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d63df7c1820..7bbf2673c7f2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -978,7 +978,7 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; - b->legacy_io->mapping = iomem_get_mapping(); + b->legacy_mem->mapping = iomem_get_mapping(); pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); if (error) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6d74386eadc2..ab3de1551b50 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1900,6 +1900,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot); #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c index 810f25a47632..6781488cfc58 100644 --- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c @@ -29,22 +29,16 @@ #define COMPHY_FW_MODE_SATA 0x1 #define COMPHY_FW_MODE_SGMII 0x2 -#define COMPHY_FW_MODE_HS_SGMII 0x3 +#define COMPHY_FW_MODE_2500BASEX 0x3 #define COMPHY_FW_MODE_USB3H 0x4 #define COMPHY_FW_MODE_USB3D 0x5 #define COMPHY_FW_MODE_PCIE 0x6 -#define COMPHY_FW_MODE_RXAUI 0x7 -#define COMPHY_FW_MODE_XFI 0x8 -#define COMPHY_FW_MODE_SFI 0x9 #define COMPHY_FW_MODE_USB3 0xa #define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ #define COMPHY_FW_SPEED_2_5G 1 -#define COMPHY_FW_SPEED_3_125G 2 /* SGMII 2.5G */ +#define COMPHY_FW_SPEED_3_125G 2 /* 2500BASE-X */ #define COMPHY_FW_SPEED_5G 3 -#define COMPHY_FW_SPEED_5_15625G 4 /* XFI 5G */ -#define COMPHY_FW_SPEED_6G 5 -#define COMPHY_FW_SPEED_10_3125G 6 /* XFI 10G */ #define COMPHY_FW_SPEED_MAX 0x3F #define COMPHY_FW_MODE(mode) ((mode) << 12) @@ -84,14 +78,14 @@ static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1, COMPHY_FW_MODE_SGMII), MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1, - COMPHY_FW_MODE_HS_SGMII), + COMPHY_FW_MODE_2500BASEX), /* lane 1 */ MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0, COMPHY_FW_MODE_PCIE), MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0, COMPHY_FW_MODE_SGMII), MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0, - COMPHY_FW_MODE_HS_SGMII), + COMPHY_FW_MODE_2500BASEX), /* lane 2 */ MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0, COMPHY_FW_MODE_SATA), @@ -205,7 +199,7 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy) COMPHY_FW_SPEED_1_25G); break; case PHY_INTERFACE_MODE_2500BASEX: - dev_dbg(lane->dev, "set lane %d to HS SGMII mode\n", + dev_dbg(lane->dev, "set lane %d to 2500BASEX mode\n", lane->id); fw_param = COMPHY_FW_NET(fw_mode, lane->port, COMPHY_FW_SPEED_3_125G); diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index 53ad127b100f..bbd6f2ad6f24 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -167,7 +167,7 @@ #define COMPHY_FW_MODE_SATA 0x1 #define COMPHY_FW_MODE_SGMII 0x2 /* SGMII 1G */ -#define COMPHY_FW_MODE_HS_SGMII 0x3 /* SGMII 2.5G */ +#define COMPHY_FW_MODE_2500BASEX 0x3 /* 2500BASE-X */ #define COMPHY_FW_MODE_USB3H 0x4 #define COMPHY_FW_MODE_USB3D 0x5 #define COMPHY_FW_MODE_PCIE 0x6 @@ -207,7 +207,7 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = { /* lane 0 */ GEN_CONF(0, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ETH_CONF(0, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(0, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), GEN_CONF(0, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), /* lane 1 */ GEN_CONF(1, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), @@ -215,10 +215,10 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = { GEN_CONF(1, 0, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), GEN_CONF(1, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ETH_CONF(1, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(1, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), /* lane 2 */ ETH_CONF(2, 0, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(2, 0, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), ETH_CONF(2, 0, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI), ETH_CONF(2, 0, PHY_INTERFACE_MODE_10GBASER, 0x1, COMPHY_FW_MODE_XFI), GEN_CONF(2, 0, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), @@ -227,26 +227,26 @@ static const struct mvebu_comphy_conf mvebu_comphy_cp110_modes[] = { /* lane 3 */ GEN_CONF(3, 0, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ETH_CONF(3, 1, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII), - ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(3, 1, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_2500BASEX), ETH_CONF(3, 1, PHY_INTERFACE_MODE_RXAUI, 0x1, COMPHY_FW_MODE_RXAUI), GEN_CONF(3, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), GEN_CONF(3, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), /* lane 4 */ ETH_CONF(4, 0, PHY_INTERFACE_MODE_SGMII, 0x2, COMPHY_FW_MODE_SGMII), - ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(4, 0, PHY_INTERFACE_MODE_2500BASEX, 0x2, COMPHY_FW_MODE_2500BASEX), ETH_CONF(4, 0, PHY_INTERFACE_MODE_10GBASER, 0x2, COMPHY_FW_MODE_XFI), ETH_CONF(4, 0, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI), GEN_CONF(4, 0, PHY_MODE_USB_DEVICE_SS, COMPHY_FW_MODE_USB3D), GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX), ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI), /* lane 5 */ ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI), GEN_CONF(5, 1, PHY_MODE_SATA, COMPHY_FW_MODE_SATA), ETH_CONF(5, 2, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), - ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_HS_SGMII), + ETH_CONF(5, 2, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), GEN_CONF(5, 2, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), }; diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 32660dc11354..f02bedf41264 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -171,16 +171,10 @@ config PTP_1588_CLOCK_OCP tristate "OpenCompute TimeCard as PTP clock" depends on PTP_1588_CLOCK depends on HAS_IOMEM && PCI - depends on SPI && I2C && MTD + depends on I2C && MTD + depends on SERIAL_8250 depends on !S390 - imply SPI_MEM - imply SPI_XILINX - imply MTD_SPI_NOR - imply I2C_XILINX - select SERIAL_8250 select NET_DEVLINK - - default n help This driver adds support for an OpenCompute time card. diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index f2b5d347d227..e5ae26227bdb 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn) int ret = 0; spin_lock_irqsave(&ctrl->txn_lock, flags); - ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0, + ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1, SLIM_MAX_TIDS, GFP_ATOMIC); if (ret < 0) { spin_unlock_irqrestore(&ctrl->txn_lock, flags); @@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) goto slim_xfer_err; } } - + /* Initialize tid to invalid value */ + txn->tid = 0; need_tid = slim_tid_txn(txn->mt, txn->mc); if (need_tid) { @@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) txn->mt, txn->mc, txn->la, ret); slim_xfer_err: - if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) { + if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) { /* * remove runtime-pm vote if this was TX only, or * if there was error during this transaction diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index c054e83ab636..7040293c2ee8 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -618,7 +618,7 @@ static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf) (mc == SLIM_USR_MC_GENERIC_ACK && mt == SLIM_MSG_MT_SRC_REFERRED_USER)) { slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4); - pm_runtime_mark_last_busy(ctrl->dev); + pm_runtime_mark_last_busy(ctrl->ctrl.dev); } } @@ -1080,7 +1080,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl) { u32 cfg = readl_relaxed(ctrl->ngd->base); - if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN || + ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP) qcom_slim_ngd_init_dma(ctrl); /* By default enable message queues */ @@ -1131,6 +1132,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl) dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n"); return 0; } + qcom_slim_ngd_setup(ctrl); return 0; } @@ -1257,13 +1259,14 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable) } /* controller state should be in sync with framework state */ complete(&ctrl->qmi.qmi_comp); - if (!pm_runtime_enabled(ctrl->dev) || - !pm_runtime_suspended(ctrl->dev)) - qcom_slim_ngd_runtime_resume(ctrl->dev); + if (!pm_runtime_enabled(ctrl->ctrl.dev) || + !pm_runtime_suspended(ctrl->ctrl.dev)) + qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev); else - pm_runtime_resume(ctrl->dev); - pm_runtime_mark_last_busy(ctrl->dev); - pm_runtime_put(ctrl->dev); + pm_runtime_resume(ctrl->ctrl.dev); + + pm_runtime_mark_last_busy(ctrl->ctrl.dev); + pm_runtime_put(ctrl->ctrl.dev); ret = slim_register_controller(&ctrl->ctrl); if (ret) { @@ -1389,7 +1392,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, /* Make sure the last dma xfer is finished */ mutex_lock(&ctrl->tx_lock); if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { - pm_runtime_get_noresume(ctrl->dev); + pm_runtime_get_noresume(ctrl->ctrl.dev); ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; qcom_slim_ngd_down(ctrl); qcom_slim_ngd_exit_dma(ctrl); @@ -1617,6 +1620,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + qcom_slim_ngd_exit_dma(ctrl); if (!ctrl->qmi.handle) return 0; diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index 3f711c1a0996..bbae3d39c7be 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -23,6 +23,7 @@ #include <linux/signal.h> #include <linux/device.h> #include <linux/spinlock.h> +#include <linux/platform_device.h> #include <asm/irq.h> #include <asm/io.h> #include <soc/fsl/qe/qe.h> @@ -53,8 +54,8 @@ struct qe_ic { struct irq_chip hc_irq; /* VIRQ numbers of QE high/low irqs */ - unsigned int virq_high; - unsigned int virq_low; + int virq_high; + int virq_low; }; /* @@ -404,42 +405,40 @@ static void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) chip->irq_eoi(&desc->irq_data); } -static void __init qe_ic_init(struct device_node *node) +static int qe_ic_init(struct platform_device *pdev) { + struct device *dev = &pdev->dev; void (*low_handler)(struct irq_desc *desc); void (*high_handler)(struct irq_desc *desc); struct qe_ic *qe_ic; - struct resource res; - u32 ret; + struct resource *res; + struct device_node *node = pdev->dev.of_node; - ret = of_address_to_resource(node, 0, &res); - if (ret) - return; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "no memory resource defined\n"); + return -ENODEV; + } - qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL); + qe_ic = devm_kzalloc(dev, sizeof(*qe_ic), GFP_KERNEL); if (qe_ic == NULL) - return; + return -ENOMEM; - qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, - &qe_ic_host_ops, qe_ic); - if (qe_ic->irqhost == NULL) { - kfree(qe_ic); - return; + qe_ic->regs = devm_ioremap(dev, res->start, resource_size(res)); + if (qe_ic->regs == NULL) { + dev_err(dev, "failed to ioremap() registers\n"); + return -ENODEV; } - qe_ic->regs = ioremap(res.start, resource_size(&res)); - qe_ic->hc_irq = qe_ic_irq_chip; - qe_ic->virq_high = irq_of_parse_and_map(node, 0); - qe_ic->virq_low = irq_of_parse_and_map(node, 1); + qe_ic->virq_high = platform_get_irq(pdev, 0); + qe_ic->virq_low = platform_get_irq(pdev, 1); - if (!qe_ic->virq_low) { - printk(KERN_ERR "Failed to map QE_IC low IRQ\n"); - kfree(qe_ic); - return; - } - if (qe_ic->virq_high != qe_ic->virq_low) { + if (qe_ic->virq_low <= 0) + return -ENODEV; + + if (qe_ic->virq_high > 0 && qe_ic->virq_high != qe_ic->virq_low) { low_handler = qe_ic_cascade_low; high_handler = qe_ic_cascade_high; } else { @@ -447,29 +446,42 @@ static void __init qe_ic_init(struct device_node *node) high_handler = NULL; } + qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS, + &qe_ic_host_ops, qe_ic); + if (qe_ic->irqhost == NULL) { + dev_err(dev, "failed to add irq domain\n"); + return -ENODEV; + } + qe_ic_write(qe_ic->regs, QEIC_CICR, 0); irq_set_handler_data(qe_ic->virq_low, qe_ic); irq_set_chained_handler(qe_ic->virq_low, low_handler); - if (qe_ic->virq_high && qe_ic->virq_high != qe_ic->virq_low) { + if (high_handler) { irq_set_handler_data(qe_ic->virq_high, qe_ic); irq_set_chained_handler(qe_ic->virq_high, high_handler); } + return 0; } +static const struct of_device_id qe_ic_ids[] = { + { .compatible = "fsl,qe-ic"}, + { .type = "qeic"}, + {}, +}; -static int __init qe_ic_of_init(void) +static struct platform_driver qe_ic_driver = { - struct device_node *np; + .driver = { + .name = "qe-ic", + .of_match_table = qe_ic_ids, + }, + .probe = qe_ic_init, +}; - np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic"); - if (!np) { - np = of_find_node_by_type(NULL, "qeic"); - if (!np) - return -ENODEV; - } - qe_ic_init(np); - of_node_put(np); +static int __init qe_ic_of_init(void) +{ + platform_driver_register(&qe_ic_driver); return 0; } subsys_initcall(qe_ic_of_init); diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h index b8e8fc8ddbe9..809d938ae166 100644 --- a/drivers/staging/media/av7110/av7110.h +++ b/drivers/staging/media/av7110/av7110.h @@ -9,12 +9,11 @@ #include <linux/input.h> #include <linux/time.h> -#include "video.h" -#include "audio.h" -#include "osd.h" - +#include <linux/dvb/video.h> +#include <linux/dvb/audio.h> #include <linux/dvb/dmx.h> #include <linux/dvb/ca.h> +#include <linux/dvb/osd.h> #include <linux/dvb/net.h> #include <linux/mutex.h> diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index b9bb63d749ec..f4079b5cb26d 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1737,6 +1737,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, return rlen; } +static void tcpm_pd_handle_msg(struct tcpm_port *port, + enum pd_msg_request message, + enum tcpm_ams ams); + static void tcpm_handle_vdm_request(struct tcpm_port *port, const __le32 *payload, int cnt) { @@ -1764,11 +1768,11 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_DONE; } - if (PD_VDO_SVDM(p[0])) { + if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action); } else { if (port->negotiated_rev >= PD_REV30) - tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); } /* @@ -2471,10 +2475,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port, NONE_AMS); break; case PD_DATA_VENDOR_DEF: - if (tcpm_vdm_ams(port) || port->nr_snk_vdo) - tcpm_handle_vdm_request(port, msg->payload, cnt); - else if (port->negotiated_rev > PD_REV20) - tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); + tcpm_handle_vdm_request(port, msg->payload, cnt); break; case PD_DATA_BIST: port->bist_request = le32_to_cpu(msg->payload[0]); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 6414bd5741b8..3a249ee7e144 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -643,8 +643,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } -#define SKB_FRAG_PAGE_ORDER get_order(32768) - static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz, struct page_frag *pfrag, gfp_t gfp) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 06f9f167222b..bd5689fa290e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -629,7 +629,7 @@ again: * inode has not been flagged as nocompress. This flag can * change at any time if we discover bad compression ratios. */ - if (nr_pages > 1 && inode_need_compress(BTRFS_I(inode), start, end)) { + if (inode_need_compress(BTRFS_I(inode), start, end)) { WARN_ON(pages); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!pages) { diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 2a2900903f8c..39db97f149b9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1743,7 +1743,11 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, struct ceph_cap_flush *ceph_alloc_cap_flush(void) { - return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); + struct ceph_cap_flush *cf; + + cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); + cf->is_capsnap = false; + return cf; } void ceph_free_cap_flush(struct ceph_cap_flush *cf) @@ -1778,7 +1782,7 @@ static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc, prev->wake = true; wake = false; } - list_del(&cf->g_list); + list_del_init(&cf->g_list); return wake; } @@ -1793,7 +1797,7 @@ static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci, prev->wake = true; wake = false; } - list_del(&cf->i_list); + list_del_init(&cf->i_list); return wake; } @@ -2352,7 +2356,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) { - if (!cf->caps) { + if (cf->is_capsnap) { last_snap_flush = cf->tid; break; } @@ -2371,7 +2375,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, first_tid = cf->tid + 1; - if (cf->caps) { + if (!cf->is_capsnap) { struct cap_msg_args arg; dout("kick_flushing_caps %p cap %p tid %llu %s\n", @@ -3516,7 +3520,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, cleaned = cf->caps; /* Is this a capsnap? */ - if (cf->caps == 0) + if (cf->is_capsnap) continue; if (cf->tid <= flush_tid) { @@ -3589,8 +3593,9 @@ out: while (!list_empty(&to_remove)) { cf = list_first_entry(&to_remove, struct ceph_cap_flush, i_list); - list_del(&cf->i_list); - ceph_free_cap_flush(cf); + list_del_init(&cf->i_list); + if (!cf->is_capsnap) + ceph_free_cap_flush(cf); } if (wake_ci) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index afdc20213876..0b69aec23e5c 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1616,7 +1616,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, spin_lock(&mdsc->cap_dirty_lock); list_for_each_entry(cf, &to_remove, i_list) - list_del(&cf->g_list); + list_del_init(&cf->g_list); if (!list_empty(&ci->i_dirty_item)) { pr_warn_ratelimited( @@ -1668,8 +1668,9 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, struct ceph_cap_flush *cf; cf = list_first_entry(&to_remove, struct ceph_cap_flush, i_list); - list_del(&cf->i_list); - ceph_free_cap_flush(cf); + list_del_init(&cf->i_list); + if (!cf->is_capsnap) + ceph_free_cap_flush(cf); } wake_up_all(&ci->i_cap_wq); diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index abd9af7727ad..3c444b9cb17b 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -394,9 +394,11 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m) { int i; - for (i = 0; i < m->possible_max_rank; i++) - kfree(m->m_info[i].export_targets); - kfree(m->m_info); + if (m->m_info) { + for (i = 0; i < m->possible_max_rank; i++) + kfree(m->m_info[i].export_targets); + kfree(m->m_info); + } kfree(m->m_data_pg_pools); kfree(m); } diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 4c6bd1042c94..15105f9da3fd 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -487,6 +487,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); return; } + capsnap->cap_flush.is_capsnap = true; + INIT_LIST_HEAD(&capsnap->cap_flush.i_list); + INIT_LIST_HEAD(&capsnap->cap_flush.g_list); spin_lock(&ci->i_ceph_lock); used = __ceph_caps_used(ci); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 9215a2f4535c..b1a363641beb 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -182,8 +182,9 @@ struct ceph_cap { struct ceph_cap_flush { u64 tid; - int caps; /* 0 means capsnap */ + int caps; bool wake; /* wake up flush waiters when finish ? */ + bool is_capsnap; /* true means capsnap */ struct list_head g_list; // global struct list_head i_list; // per inode }; diff --git a/fs/io_uring.c b/fs/io_uring.c index 04c6d059ea94..a2e20a6fbfed 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2477,8 +2477,10 @@ static void io_fallback_req_func(struct work_struct *work) struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; + percpu_ref_get(&ctx->refs); llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) req->io_task_work.func(req); + percpu_ref_put(&ctx->refs); } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -9370,9 +9372,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (ctx->flags & IORING_SETUP_SQPOLL) { io_cqring_overflow_flush(ctx, false); - ret = -EOWNERDEAD; - if (unlikely(ctx->sq_data->thread == NULL)) + if (unlikely(ctx->sq_data->thread == NULL)) { + ret = -EOWNERDEAD; goto out; + } if (flags & IORING_ENTER_SQ_WAKEUP) wake_up(&ctx->sq_data->wait); if (flags & IORING_ENTER_SQ_WAIT) { @@ -9840,10 +9843,11 @@ static int io_register_personality(struct io_ring_ctx *ctx) ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); - if (!ret) - return id; - put_cred(creds); - return ret; + if (ret < 0) { + put_cred(creds); + return ret; + } + return id; } static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, diff --git a/fs/namespace.c b/fs/namespace.c index f79d9471cb76..97adcb5ab5d5 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1716,8 +1716,12 @@ static inline bool may_mount(void) } #ifdef CONFIG_MANDATORY_FILE_LOCKING -static inline bool may_mandlock(void) +static bool may_mandlock(void) { + pr_warn_once("======================================================\n" + "WARNING: the mand mount option is being deprecated and\n" + " will be removed in v5.15!\n" + "======================================================\n"); return capable(CAP_SYS_ADMIN); } #else diff --git a/fs/pipe.c b/fs/pipe.c index 678dee2a8228..6d4342bad9f1 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -363,10 +363,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) * _very_ unlikely case that the pipe was full, but we got * no data. */ - if (unlikely(was_full)) { + if (unlikely(was_full)) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); - kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); - } + kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); /* * But because we didn't read anything, at this point we can @@ -385,12 +384,11 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) wake_next_reader = false; __pipe_unlock(pipe); - if (was_full) { + if (was_full) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); - kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); - } if (wake_next_reader) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); + kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); if (ret > 0) file_accessed(filp); return ret; @@ -565,10 +563,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) * become empty while we dropped the lock. */ __pipe_unlock(pipe); - if (was_empty) { + if (was_empty) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - } + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); __pipe_lock(pipe); was_empty = pipe_empty(pipe->head, pipe->tail); @@ -591,10 +588,9 @@ out: * Epoll nonsensically wants a wakeup whether the pipe * was already empty or not. */ - if (was_empty || pipe->poll_usage) { + if (was_empty || pipe->poll_usage) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); - kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); - } + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); if (wake_next_writer) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) { diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a6730072d13a..694264503119 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1088,6 +1088,48 @@ struct ieee80211_ext { } u; } __packed __aligned(2); +#define IEEE80211_TWT_CONTROL_NDP BIT(0) +#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) +#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) +#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4) +#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5) + +#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0) +#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1) +#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4) +#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5) +#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6) +#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7) +#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10) +#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15) + +enum ieee80211_twt_setup_cmd { + TWT_SETUP_CMD_REQUEST, + TWT_SETUP_CMD_SUGGEST, + TWT_SETUP_CMD_DEMAND, + TWT_SETUP_CMD_GROUPING, + TWT_SETUP_CMD_ACCEPT, + TWT_SETUP_CMD_ALTERNATE, + TWT_SETUP_CMD_DICTATE, + TWT_SETUP_CMD_REJECT, +}; + +struct ieee80211_twt_params { + __le16 req_type; + __le64 twt; + u8 min_twt_dur; + __le16 mantissa; + u8 channel; +} __packed; + +struct ieee80211_twt_setup { + u8 dialog_token; + u8 element_id; + u8 length; + u8 control; + u8 params[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1252,6 +1294,10 @@ struct ieee80211_mgmt { __le16 toa_error; u8 variable[0]; } __packed ftm; + struct { + u8 action_code; + u8 variable[]; + } __packed s1g; } u; } __packed action; } u; @@ -2266,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 + /** * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel @@ -2282,6 +2331,7 @@ struct ieee80211_he_6ghz_oper { #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 #define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 +#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 u8 control; u8 ccfs0; u8 ccfs1; @@ -2289,6 +2339,44 @@ struct ieee80211_he_6ghz_oper { } __packed; /* + * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", + * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation + * subfield encoding", and two category for each type in "Table E-12-Regulatory + * Info subfield encoding in the United States". + * So it it totally max 8 Transmit Power Envelope element. + */ +#define IEEE80211_TPE_MAX_IE_COUNT 8 +/* + * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" + * of "IEEE Std 802.11ax™‐2021", the max power level is 8. + */ +#define IEEE80211_MAX_NUM_PWR_LEVEL 8 + +#define IEEE80211_TPE_MAX_POWER_COUNT 8 + +/* transmit power interpretation type of transmit power envelope element */ +enum ieee80211_tx_power_intrpt_type { + IEEE80211_TPE_LOCAL_EIRP, + IEEE80211_TPE_LOCAL_EIRP_PSD, + IEEE80211_TPE_REG_CLIENT_EIRP, + IEEE80211_TPE_REG_CLIENT_EIRP_PSD, +}; + +/** + * struct ieee80211_tx_pwr_env + * + * This structure represents the "Transmit Power Envelope element" + */ +struct ieee80211_tx_pwr_env { + u8 tx_power_info; + s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; +} __packed; + +#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 +#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 +#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 + +/* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte * after the ext ID byte. It is assumed that he_oper_ie has at least @@ -2869,7 +2957,7 @@ enum ieee80211_eid { WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, @@ -2881,6 +2969,7 @@ enum ieee80211_eid { WLAN_EID_AID_RESPONSE = 211, WLAN_EID_S1G_BCN_COMPAT = 213, WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_TWT = 216, WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, @@ -2950,6 +3039,7 @@ enum ieee80211_category { WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_S1G = 22, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; @@ -3023,6 +3113,20 @@ enum ieee80211_key_len { WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; +enum ieee80211_s1g_actioncode { + WLAN_S1G_AID_SWITCH_REQUEST, + WLAN_S1G_AID_SWITCH_RESPONSE, + WLAN_S1G_SYNC_CONTROL, + WLAN_S1G_STA_INFO_ANNOUNCE, + WLAN_S1G_EDCA_PARAM_SET, + WLAN_S1G_EL_OPERATION, + WLAN_S1G_TWT_SETUP, + WLAN_S1G_TWT_TEARDOWN, + WLAN_S1G_SECT_GROUP_ID_LIST, + WLAN_S1G_SECT_ID_FEEDBACK, + WLAN_S1G_TWT_INFORMATION = 11, +}; + #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f0ee30881ca9..20151c4f1e0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, diff --git a/include/linux/mhi.h b/include/linux/mhi.h index c493a80cb453..beb918328eef 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -721,13 +721,8 @@ void mhi_device_put(struct mhi_device *mhi_dev); * host and device execution environments match and * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels - * @flags: MHI channel flags */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, - unsigned int flags); - -/* Automatically allocate and queue inbound buffers */ -#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); /** * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 0d5a2691e7e9..f9b53acb4e02 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h index 3f8bc973d67d..19253bfacd1a 100644 --- a/include/linux/ssb/ssb_driver_extif.h +++ b/include/linux/ssb/ssb_driver_extif.h @@ -197,7 +197,7 @@ struct ssb_extif { static inline bool ssb_extif_available(struct ssb_extif *extif) { - return 0; + return false; } static inline diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 42235c178b06..653e7d0f65cb 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -210,6 +210,8 @@ struct inet6_dev { unsigned long tstamp; /* ipv6InterfaceTable update timestamp */ struct rcu_head rcu; + + unsigned int ra_mtu; }; static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 15b7fbe6b15c..c412dde4d67d 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -267,7 +267,7 @@ static inline bool fib6_check_expired(const struct fib6_info *f6i) return false; } -/* Function to safely get fn->sernum for passed in rt +/* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely * Return false if not @@ -282,7 +282,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, if (fn) { *cookie = fn->fn_sernum; - /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ + /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */ smp_rmb(); status = true; } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a23e6734d26b..af0fc13cea34 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3926,6 +3926,13 @@ struct ieee80211_prep_tx_info { * @set_sar_specs: Update the SAR (TX power) settings. * @sta_set_decap_offload: Called to notify the driver when a station is allowed * to use rx decapsulation offload + * @add_twt_setup: Update hw with TWT agreement parameters received from the peer. + * This callback allows the hw to check if requested parameters + * are supported and if there is enough room for a new agreement. + * The hw is expected to set agreement result in the req_type field of + * twt structure. + * @twt_teardown_request: Update the hw with TWT teardown request received + * from the peer. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4249,6 +4256,11 @@ struct ieee80211_ops { void (*sta_set_decap_offload)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled); + void (*add_twt_setup)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt); + void (*twt_teardown_request)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 flowid); }; /** diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 1f4e1816fd36..947733a639a6 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -65,6 +65,13 @@ struct netns_xfrm { u32 sysctl_aevent_rseqth; int sysctl_larval_drop; u32 sysctl_acq_expires; + + u8 policy_default; +#define XFRM_POL_DEFAULT_IN 1 +#define XFRM_POL_DEFAULT_OUT 2 +#define XFRM_POL_DEFAULT_FWD 4 +#define XFRM_POL_DEFAULT_MASK 7 + #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_hdr; #endif diff --git a/include/net/sock.h b/include/net/sock.h index 95b25777b53e..66a9a90f9558 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2717,6 +2717,7 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; +#define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index cbff7c2a9724..2308210793a0 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1075,6 +1075,22 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un } #ifdef CONFIG_XFRM +static inline bool +xfrm_default_allow(struct net *net, int dir) +{ + u8 def = net->xfrm.policy_default; + + switch (dir) { + case XFRM_POLICY_IN: + return def & XFRM_POL_DEFAULT_IN ? false : true; + case XFRM_POLICY_OUT: + return def & XFRM_POL_DEFAULT_OUT ? false : true; + case XFRM_POLICY_FWD: + return def & XFRM_POL_DEFAULT_FWD ? false : true; + } + return false; +} + int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); @@ -1088,9 +1104,13 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, ndir, skb, family); - return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || - (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || - __xfrm_policy_check(sk, ndir, skb, family); + if (xfrm_default_allow(net, dir)) + return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || + __xfrm_policy_check(sk, ndir, skb, family); + else + return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || + __xfrm_policy_check(sk, ndir, skb, family); } static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) @@ -1142,9 +1162,13 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) { struct net *net = dev_net(skb->dev); - return !net->xfrm.policy_count[XFRM_POLICY_OUT] || - (skb_dst(skb)->flags & DST_NOXFRM) || - __xfrm_route_forward(skb, family); + if (xfrm_default_allow(net, XFRM_POLICY_FWD)) + return !net->xfrm.policy_count[XFRM_POLICY_OUT] || + (skb_dst(skb)->flags & DST_NOXFRM) || + __xfrm_route_forward(skb, family); + else + return (skb_dst(skb)->flags & DST_NOXFRM) || + __xfrm_route_forward(skb, family); } static inline int xfrm4_route_forward(struct sk_buff *skb) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 390270e00a1d..f160484afc5c 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -48,7 +48,9 @@ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\ + {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/drivers/staging/media/av7110/audio.h b/include/uapi/linux/dvb/audio.h index 2f869da69171..2f869da69171 100644 --- a/drivers/staging/media/av7110/audio.h +++ b/include/uapi/linux/dvb/audio.h diff --git a/drivers/staging/media/av7110/osd.h b/include/uapi/linux/dvb/osd.h index 858997c74043..858997c74043 100644 --- a/drivers/staging/media/av7110/osd.h +++ b/include/uapi/linux/dvb/osd.h diff --git a/drivers/staging/media/av7110/video.h b/include/uapi/linux/dvb/video.h index 179f1ec60af6..179f1ec60af6 100644 --- a/drivers/staging/media/av7110/video.h +++ b/include/uapi/linux/dvb/video.h diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 8aad65b69054..eebd3894fe89 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -417,6 +417,7 @@ enum { IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ + IFLA_INET6_RA_MTU, /* mtu carried in the RA message */ __IFLA_INET6_MAX }; diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ffc6a5391bb7..b96c1ea7166d 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -213,6 +213,11 @@ enum { XFRM_MSG_GETSPDINFO, #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO + XFRM_MSG_SETDEFAULT, +#define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT + XFRM_MSG_GETDEFAULT, +#define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT + XFRM_MSG_MAPPING, #define XFRM_MSG_MAPPING XFRM_MSG_MAPPING __XFRM_MSG_MAX @@ -508,6 +513,12 @@ struct xfrm_user_offload { #define XFRM_OFFLOAD_IPV6 1 #define XFRM_OFFLOAD_INBOUND 2 +struct xfrm_userpolicy_default { +#define XFRM_USERPOLICY_DIRMASK_MAX (sizeof(__u8) * 8) + __u8 dirmask; + __u8 action; +}; + #ifndef __KERNEL__ /* backwards compatibility for userspace */ #define XFRMGRP_ACQUIRE 1 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e5f2b23bb7c9..9134aedfdb7d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5302,8 +5302,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && - func_id != BPF_FUNC_ringbuf_submit && - func_id != BPF_FUNC_ringbuf_discard && func_id != BPF_FUNC_ringbuf_query) goto error; break; @@ -5412,6 +5410,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; + case BPF_FUNC_ringbuf_output: + case BPF_FUNC_ringbuf_reserve: + case BPF_FUNC_ringbuf_query: + if (map->map_type != BPF_MAP_TYPE_RINGBUF) + goto error; + break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; diff --git a/kernel/cred.c b/kernel/cred.c index e6fd2b3fc31f..f784e08c2fbd 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -286,13 +286,13 @@ struct cred *prepare_creds(void) new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) - goto error; - new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + validate_creds(new); return new; @@ -753,13 +753,13 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) #ifdef CONFIG_SECURITY new->security = NULL; #endif - if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) - goto error; - new->ucounts = get_ucounts(new->ucounts); if (!new->ucounts) goto error; + if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0) + goto error; + put_cred(old); validate_creds(new); return new; diff --git a/kernel/fork.c b/kernel/fork.c index e8b41e212110..c97e85245dfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -828,10 +828,10 @@ void __init fork_init(void) for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2; - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); #ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dfc940d5221d..8ea35ba6699f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2476,7 +2476,7 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, if (!rc) { /* * This indicates there is an entry in the reserve map - * added by alloc_huge_page. We know it was added + * not added by alloc_huge_page. We know it was added * before the alloc_huge_page call, otherwise * HPageRestoreReserve would be set on the page. * Remove the entry so that a subsequent allocation @@ -4660,7 +4660,9 @@ retry_avoidcopy: spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: - restore_reserve_on_error(h, vma, haddr, new_page); + /* No restore in case of successful pagetable update (Break COW) */ + if (new_page != old_page) + restore_reserve_on_error(h, vma, haddr, new_page); put_page(new_page); out_release_old: put_page(old_page); @@ -4776,7 +4778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); - bool new_page = false; + bool new_page, new_pagecache_page = false; /* * Currently, we are forced to kill the process in the event the @@ -4799,6 +4801,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, goto out; retry: + new_page = false; page = find_lock_page(mapping, idx); if (!page) { /* Check for page in userfault range */ @@ -4842,6 +4845,7 @@ retry: goto retry; goto out; } + new_pagecache_page = true; } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { @@ -4926,7 +4930,9 @@ backout: spin_unlock(ptl); backout_unlocked: unlock_page(page); - restore_reserve_on_error(h, vma, haddr, page); + /* restore reserve for newly allocated pages not in page cache */ + if (new_page && !new_pagecache_page) + restore_reserve_on_error(h, vma, haddr, page); put_page(page); goto out; } @@ -5135,6 +5141,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; + bool new_pagecache_page = false; if (is_continue) { ret = -EFAULT; @@ -5228,6 +5235,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; + new_pagecache_page = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5291,7 +5299,8 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - restore_reserve_on_error(h, dst_vma, dst_addr, page); + if (!new_pagecache_page) + restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index eefd823deb67..470400cc7513 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1146,7 +1146,7 @@ static int __get_hwpoison_page(struct page *page) * unexpected races caused by taking a page refcount. */ if (!HWPoisonHandlable(head)) - return 0; + return -EBUSY; if (PageTransHuge(head)) { /* @@ -1199,9 +1199,15 @@ try_again: } goto out; } else if (ret == -EBUSY) { - /* We raced with freeing huge page to buddy, retry. */ - if (pass++ < 3) + /* + * We raced with (possibly temporary) unhandlable + * page, retry. + */ + if (pass++ < 3) { + shake_page(p, 1); goto try_again; + } + ret = -EIO; goto out; } } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8cb75b26ea4f..86c3af79e874 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1731,6 +1731,7 @@ failed_removal_isolated: undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); memory_notify(MEM_CANCEL_OFFLINE, &arg); failed_removal_pcplists_disabled: + lru_cache_enable(); zone_pcp_enable(zone); failed_removal: pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 856b175c15a4..eeb3a9cb36bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3453,19 +3453,10 @@ void free_unref_page_list(struct list_head *list) * comment in free_unref_page. */ migratetype = get_pcppage_migratetype(page); - if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { - if (unlikely(is_migrate_isolate(migratetype))) { - list_del(&page->lru); - free_one_page(page_zone(page), page, pfn, 0, - migratetype, FPI_NONE); - continue; - } - - /* - * Non-isolated types over MIGRATE_PCPTYPES get added - * to the MIGRATE_MOVABLE pcp list. - */ - set_pcppage_migratetype(page, MIGRATE_MOVABLE); + if (unlikely(is_migrate_isolate(migratetype))) { + list_del(&page->lru); + free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); + continue; } set_page_private(page, pfn); @@ -3475,7 +3466,15 @@ void free_unref_page_list(struct list_head *list) list_for_each_entry_safe(page, next, list, lru) { pfn = page_private(page); set_page_private(page, 0); + + /* + * Non-isolated types over MIGRATE_PCPTYPES get added + * to the MIGRATE_MOVABLE pcp list. + */ migratetype = get_pcppage_migratetype(page); + if (unlikely(migratetype >= MIGRATE_PCPTYPES)) + migratetype = MIGRATE_MOVABLE; + trace_mm_page_free_batched(page); free_unref_page_commit(page, pfn, migratetype, 0); diff --git a/mm/shmem.c b/mm/shmem.c index 70d9ce294bb4..dacda7463d54 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1696,8 +1696,7 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; - struct swap_info_struct *si; - struct page *page = NULL; + struct page *page; swp_entry_t swap; int error; @@ -1705,12 +1704,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap = radix_to_swp_entry(*pagep); *pagep = NULL; - /* Prevent swapoff from happening to us. */ - si = get_swap_device(swap); - if (!si) { - error = EINVAL; - goto failed; - } /* Look it up and read it in.. */ page = lookup_swap_cache(swap, NULL, 0); if (!page) { @@ -1772,8 +1765,6 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index, swap_free(swap); *pagep = page; - if (si) - put_swap_device(si); return 0; failed: if (!shmem_confirm_swap(mapping, index, swap)) @@ -1784,9 +1775,6 @@ unlock: put_page(page); } - if (si) - put_swap_device(si); - return error; } diff --git a/mm/swap_state.c b/mm/swap_state.c index c56aa9ac050d..bc7cee6b2ec5 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -628,13 +628,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; - /* Test swap type to make sure the dereference is safe */ - if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { - struct inode *inode = si->swap_file->f_mapping->host; - if (inode_read_congested(inode)) - goto skip; - } - do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4620df62f0ff..eeae2f6bc532 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -100,9 +100,12 @@ struct scan_control { unsigned int may_swap:1; /* - * Cgroups are not reclaimed below their configured memory.low, - * unless we threaten to OOM. If any cgroups are skipped due to - * memory.low and nothing was reclaimed, go back for memory.low. + * Cgroup memory below memory.low is protected as long as we + * don't threaten to OOM. If any cgroup is reclaimed at + * reduced force or passed over entirely due to its memory.low + * setting (memcg_low_skipped), and nothing is reclaimed as a + * result, then go back for one more cycle that reclaims the protected + * memory (memcg_low_reclaim) to avert OOM. */ unsigned int memcg_low_reclaim:1; unsigned int memcg_low_skipped:1; @@ -2537,15 +2540,14 @@ out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; + unsigned long low, min; unsigned long scan; - unsigned long protection; lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); - protection = mem_cgroup_protection(sc->target_mem_cgroup, - memcg, - sc->memcg_low_reclaim); + mem_cgroup_protection(sc->target_mem_cgroup, memcg, + &min, &low); - if (protection) { + if (min || low) { /* * Scale a cgroup's reclaim pressure by proportioning * its current usage to its memory.low or memory.min @@ -2576,6 +2578,15 @@ out: * hard protection. */ unsigned long cgroup_size = mem_cgroup_size(memcg); + unsigned long protection; + + /* memory.low scaling, make sure we retry before OOM */ + if (!sc->memcg_low_reclaim && low > min) { + protection = low; + sc->memcg_low_skipped = 1; + } else { + protection = min; + } /* Avoid TOCTOU with earlier protection check */ cgroup_size = max(cgroup_size, protection); @@ -4413,11 +4424,13 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in .may_swap = 1, .reclaim_idx = gfp_zone(gfp_mask), }; + unsigned long pflags; trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, sc.gfp_mask); cond_resched(); + psi_memstall_enter(&pflags); fs_reclaim_acquire(sc.gfp_mask); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP @@ -4442,6 +4455,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in current->flags &= ~PF_SWAPWRITE; memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); + psi_memstall_leave(&pflags); trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2dcf1c084b20..972c8cb303a5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2605,6 +2605,7 @@ static int do_setlink(const struct sk_buff *skb, return err; if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { + const char *pat = ifname && ifname[0] ? ifname : NULL; struct net *net; int new_ifindex; @@ -2620,7 +2621,7 @@ static int do_setlink(const struct sk_buff *skb, else new_ifindex = 0; - err = __dev_change_net_namespace(dev, net, ifname, new_ifindex); + err = __dev_change_net_namespace(dev, net, pat, new_ifindex); put_net(net); if (err) goto errout; diff --git a/net/core/sock.c b/net/core/sock.c index 950f1e70dbf5..62627e868e03 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2574,7 +2574,6 @@ static void sk_leave_memory_pressure(struct sock *sk) } } -#define SKB_FRAG_PAGE_ORDER get_order(32768) DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); /** diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 099259fc826a..7fbd0b532f52 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -465,14 +465,16 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) if (!doi_def) return; - switch (doi_def->type) { - case CIPSO_V4_MAP_TRANS: - kfree(doi_def->map.std->lvl.cipso); - kfree(doi_def->map.std->lvl.local); - kfree(doi_def->map.std->cat.cipso); - kfree(doi_def->map.std->cat.local); - kfree(doi_def->map.std); - break; + if (doi_def->map.std) { + switch (doi_def->type) { + case CIPSO_V4_MAP_TRANS: + kfree(doi_def->map.std->lvl.cipso); + kfree(doi_def->map.std->lvl.local); + kfree(doi_def->map.std->cat.cipso); + kfree(doi_def->map.std->cat.local); + kfree(doi_def->map.std); + break; + } } kfree(doi_def); } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index a09e36c4a413..851f542928a3 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -97,7 +97,6 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, static void esp_ssg_unref(struct xfrm_state *x, void *tmp) { - struct esp_output_extra *extra = esp_tmp_extra(tmp); struct crypto_aead *aead = x->data; int extralen = 0; u8 *iv; @@ -105,9 +104,8 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) struct scatterlist *sg; if (x->props.flags & XFRM_STATE_ESN) - extralen += sizeof(*extra); + extralen += sizeof(struct esp_output_extra); - extra = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, extralen); req = esp_tmp_req(aead, iv); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6ebf05859acb..177d26d8fb9c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -473,6 +473,8 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b181773d7ad3..1e3b18797070 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -601,14 +601,14 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) return oldest; } -static inline u32 fnhe_hashfun(__be32 daddr) +static u32 fnhe_hashfun(__be32 daddr) { - static u32 fnhe_hashrnd __read_mostly; - u32 hval; + static siphash_key_t fnhe_hash_key __read_mostly; + u64 hval; - net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); - hval = jhash_1word((__force u32)daddr, fnhe_hashrnd); - return hash_32(hval, FNHE_HASH_SHIFT); + net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); + hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); + return hash_64(hval, FNHE_HASH_SHIFT); } static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f931def6302e..e8b48df73c85 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3338,6 +3338,7 @@ int tcp_set_window_clamp(struct sock *sk, int val) } else { tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? SOCK_MIN_RCVBUF / 2 : val; + tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); } return 0; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 8381288a0d6e..17756f3ed33b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -394,6 +394,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; ndev->cnf.mtu6 = dev->mtu; + ndev->ra_mtu = 0; ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); if (!ndev->nd_parms) { kfree(ndev); @@ -3849,6 +3850,7 @@ restart: } idev->tstamp = jiffies; + idev->ra_mtu = 0; /* Last: Shot the device (if unregistered) */ if (unregister) { @@ -5543,6 +5545,7 @@ static inline size_t inet6_ifla6_size(void) + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */ + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */ + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */ + + nla_total_size(4) /* IFLA_INET6_RA_MTU */ + 0; } @@ -5651,6 +5654,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode)) goto nla_put_failure; + if (idev->ra_mtu && + nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -5767,6 +5774,9 @@ update_lft: static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = { [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 }, [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) }, + [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT, + .reject_message = + "IFLA_INET6_RA_MTU can not be set" }, }; static int check_addr_gen_mode(int mode) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index a8f118e469b7..1bec5b22f80d 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1341,7 +1341,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt, struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&rt->fib6_table->tb6_lock)); - /* paired with smp_rmb() in rt6_get_cookie_safe() */ + /* paired with smp_rmb() in fib6_get_cookie_safe() */ smp_wmb(); while (fn) { fn->fn_sernum = sernum; diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3ad201d372d8..7baf41d160f5 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -629,6 +629,8 @@ drop: static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index c467c6419893..4b098521a44c 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1391,12 +1391,6 @@ skip_defrtr: } } - /* - * Send a notify if RA changed managed/otherconf flags or timer settings - */ - if (send_ifinfo_notify) - inet6_ifinfo_notify(RTM_NEWLINK, in6_dev); - skip_linkparms: /* @@ -1496,6 +1490,11 @@ skip_routeinfo: memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); mtu = ntohl(n); + if (in6_dev->ra_mtu != mtu) { + in6_dev->ra_mtu = mtu; + send_ifinfo_notify = true; + } + if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu); } else if (in6_dev->cnf.mtu6 != mtu) { @@ -1519,6 +1518,12 @@ skip_routeinfo: ND_PRINTK(2, warn, "RA: invalid RA options\n"); } out: + /* Send a notify if RA changed managed/otherconf flags or + * timer settings or ra_mtu value + */ + if (send_ifinfo_notify) + inet6_ifinfo_notify(RTM_NEWLINK, in6_dev); + fib6_info_release(rt); if (neigh) neigh_release(neigh); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6cf4bb89ca69..f34137d5bf85 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -41,6 +41,7 @@ #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/jhash.h> +#include <linux/siphash.h> #include <net/net_namespace.h> #include <net/snmp.h> #include <net/ipv6.h> @@ -1484,17 +1485,24 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static u32 seed __read_mostly; - u32 val; + static siphash_key_t rt6_exception_key __read_mostly; + struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + }; + u64 val; - net_get_random_once(&seed, sizeof(seed)); - val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed); + net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key)); #ifdef CONFIG_IPV6_SUBTREES if (src) - val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val); + combined.src = *src; #endif - return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); + val = siphash(&combined, sizeof(combined), &rt6_exception_key); + + return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); } /* Helper function to find the cached rt in the hash table diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index bcb7cc06db3d..cd3731cbf6c6 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -1447,4 +1447,40 @@ static inline void drv_sta_set_decap_offload(struct ieee80211_local *local, trace_drv_return_void(local); } +static inline void drv_add_twt_setup(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt) +{ + struct ieee80211_twt_params *twt_agrt; + + might_sleep(); + + if (!check_sdata_in_driver(sdata)) + return; + + twt_agrt = (void *)twt->params; + + trace_drv_add_twt_setup(local, sta, twt, twt_agrt); + local->ops->add_twt_setup(&local->hw, sta, twt); + trace_drv_return_void(local); +} + +static inline void drv_twt_teardown_request(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta *sta, + u8 flowid) +{ + might_sleep(); + if (!check_sdata_in_driver(sdata)) + return; + + if (!local->ops->twt_teardown_request) + return; + + trace_drv_twt_teardown_request(local, sta, flowid); + local->ops->twt_teardown_request(&local->hw, sta, flowid); + trace_drv_return_void(local); +} + #endif /* __MAC80211_DRIVER_OPS */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e8945c20688a..159af6c3ffb0 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -946,6 +946,7 @@ struct ieee80211_sub_if_data { struct work_struct work; struct sk_buff_head skb_queue; + struct sk_buff_head status_queue; u8 needed_rx_chains; enum ieee80211_smps_mode smps_mode; @@ -1533,6 +1534,7 @@ struct ieee802_11_elems { const struct ieee80211_he_spr *he_spr; const struct ieee80211_mu_edca_param_set *mu_edca_param_set; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_tx_pwr_env *tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; const u8 *uora_element; const u8 *mesh_id; const u8 *peering; @@ -1583,6 +1585,8 @@ struct ieee802_11_elems { u8 perr_len; u8 country_elem_len; u8 bssid_index_len; + u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT]; + u8 tx_pwr_env_num; /* whether a parse error occurred while retrieving these elements */ bool parse_error; @@ -2080,6 +2084,11 @@ ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif, /* S1G */ void ieee80211_s1g_sta_rate_init(struct sta_info *sta); +bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb); +void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); +void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 769f8f585c06..62c95597704b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -552,6 +552,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do */ ieee80211_free_keys(sdata, true); skb_queue_purge(&sdata->skb_queue); + skb_queue_purge(&sdata->status_queue); } spin_lock_irqsave(&local->queue_stop_reason_lock, flags); @@ -984,6 +985,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) } skb_queue_head_init(&sdata->skb_queue); + skb_queue_head_init(&sdata->status_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); return 0; @@ -1382,6 +1384,16 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local, WARN_ON(1); break; } + } else if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_S1G) { + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_TEARDOWN: + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_rx_twt_action(sdata, skb); + break; + default: + break; + } } else if (ieee80211_is_ext(mgmt->frame_control)) { if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_rx_queued_ext(sdata, skb); @@ -1437,6 +1449,24 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local, } } +static void ieee80211_iface_process_status(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_S1G) { + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_TEARDOWN: + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_status_twt_action(sdata, skb); + break; + default: + break; + } + } +} + static void ieee80211_iface_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = @@ -1466,6 +1496,16 @@ static void ieee80211_iface_work(struct work_struct *work) kcov_remote_stop(); } + /* process status queue */ + while ((skb = skb_dequeue(&sdata->status_queue))) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + + ieee80211_iface_process_status(sdata, skb); + kfree_skb(skb); + + kcov_remote_stop(); + } + /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: @@ -1529,6 +1569,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, } skb_queue_head_init(&sdata->skb_queue); + skb_queue_head_init(&sdata->status_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 33c56eab07fc..99ed68f7dc36 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3212,6 +3212,68 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) return RX_CONTINUE; } +static bool +ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ieee80211_sub_if_data *sdata = rx->sdata; + const struct ieee80211_sta_he_cap *hecap; + struct ieee80211_supported_band *sband; + + /* TWT actions are only supported in AP for the moment */ + if (sdata->vif.type != NL80211_IFTYPE_AP) + return false; + + if (!rx->local->ops->add_twt_setup) + return false; + + sband = rx->local->hw.wiphy->bands[status->band]; + hecap = ieee80211_get_he_iftype_cap(sband, + ieee80211_vif_type_p2p(&sdata->vif)); + if (!hecap) + return false; + + if (!(hecap->he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_TWT_RES)) + return false; + + if (!rx->sta) + return false; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: { + struct ieee80211_twt_setup *twt; + + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + + 1 + /* action code */ + sizeof(struct ieee80211_twt_setup) + + 2 /* TWT req_type agrt */) + break; + + twt = (void *)mgmt->u.action.u.s1g.variable; + if (twt->element_id != WLAN_EID_S1G_TWT) + break; + + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + + 4 + /* action code + token + tlv */ + twt->length) + break; + + return true; /* queue the frame */ + } + case WLAN_S1G_TWT_TEARDOWN: + if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2) + break; + + return true; /* queue the frame */ + default: + break; + } + + return false; +} + static ieee80211_rx_result debug_noinline ieee80211_rx_h_action(struct ieee80211_rx_data *rx) { @@ -3491,6 +3553,17 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) !mesh_path_sel_is_hwmp(sdata)) break; goto queue; + case WLAN_CATEGORY_S1G: + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + case WLAN_S1G_TWT_TEARDOWN: + if (ieee80211_process_rx_twt_action(rx)) + goto queue; + break; + default: + break; + } + break; } return RX_CONTINUE; diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c index c33f332b049a..7e35ab5b6166 100644 --- a/net/mac80211/s1g.c +++ b/net/mac80211/s1g.c @@ -6,6 +6,7 @@ #include <linux/ieee80211.h> #include <net/mac80211.h> #include "ieee80211_i.h" +#include "driver-ops.h" void ieee80211_s1g_sta_rate_init(struct sta_info *sta) { @@ -14,3 +15,182 @@ void ieee80211_s1g_sta_rate_init(struct sta_info *sta) sta->rx_stats.last_rate = STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G); } + +bool ieee80211_s1g_is_twt_setup(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + if (likely(!ieee80211_is_action(mgmt->frame_control))) + return false; + + if (likely(mgmt->u.action.category != WLAN_CATEGORY_S1G)) + return false; + + return mgmt->u.action.u.s1g.action_code == WLAN_S1G_TWT_SETUP; +} + +static void +ieee80211_s1g_send_twt_setup(struct ieee80211_sub_if_data *sdata, const u8 *da, + const u8 *bssid, struct ieee80211_twt_setup *twt) +{ + int len = IEEE80211_MIN_ACTION_SIZE + 4 + twt->length; + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_S1G; + mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_SETUP; + memcpy(mgmt->u.action.u.s1g.variable, twt, 3 + twt->length); + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_INTFL_MLME_CONN_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); +} + +static void +ieee80211_s1g_send_twt_teardown(struct ieee80211_sub_if_data *sdata, + const u8 *da, const u8 *bssid, u8 flowid) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + u8 *id; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + IEEE80211_MIN_ACTION_SIZE + 2); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, IEEE80211_MIN_ACTION_SIZE + 2); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, da, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, bssid, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_S1G; + mgmt->u.action.u.s1g.action_code = WLAN_S1G_TWT_TEARDOWN; + id = (u8 *)mgmt->u.action.u.s1g.variable; + *id = flowid; + + IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_REQ_TX_STATUS; + ieee80211_tx_skb(sdata, skb); +} + +static void +ieee80211_s1g_rx_twt_setup(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable; + struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + + twt_agrt->req_type &= cpu_to_le16(~IEEE80211_TWT_REQTYPE_REQUEST); + + /* broadcast TWT not supported yet */ + if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) { + le16p_replace_bits(&twt_agrt->req_type, + TWT_SETUP_CMD_REJECT, + IEEE80211_TWT_REQTYPE_SETUP_CMD); + goto out; + } + + drv_add_twt_setup(sdata->local, sdata, &sta->sta, twt); +out: + ieee80211_s1g_send_twt_setup(sdata, mgmt->sa, sdata->vif.addr, twt); +} + +static void +ieee80211_s1g_rx_twt_teardown(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + + drv_twt_teardown_request(sdata->local, sdata, &sta->sta, + mgmt->u.action.u.s1g.variable[0]); +} + +static void +ieee80211_s1g_tx_twt_setup_fail(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_twt_setup *twt = (void *)mgmt->u.action.u.s1g.variable; + struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + u8 flowid = le16_get_bits(twt_agrt->req_type, + IEEE80211_TWT_REQTYPE_FLOWID); + + drv_twt_teardown_request(sdata->local, sdata, &sta->sta, flowid); + + ieee80211_s1g_send_twt_teardown(sdata, mgmt->sa, sdata->vif.addr, + flowid); +} + +void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mgmt->sa); + if (!sta) + goto out; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + ieee80211_s1g_rx_twt_setup(sdata, sta, skb); + break; + case WLAN_S1G_TWT_TEARDOWN: + ieee80211_s1g_rx_twt_teardown(sdata, sta, skb); + break; + default: + break; + } + +out: + mutex_unlock(&local->sta_mtx); +} + +void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_local *local = sdata->local; + struct sta_info *sta; + + mutex_lock(&local->sta_mtx); + + sta = sta_info_get_bss(sdata, mgmt->da); + if (!sta) + goto out; + + switch (mgmt->u.action.u.s1g.action_code) { + case WLAN_S1G_TWT_SETUP: + /* process failed twt setup frames */ + ieee80211_s1g_tx_twt_setup_fail(sdata, sta, skb); + break; + default: + break; + } + +out: + mutex_unlock(&local->sta_mtx); +} diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 1f295e5721ef..f6f63a0b1b72 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -705,13 +705,26 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, /* Check to see if packet is a TDLS teardown packet */ if (ieee80211_is_data(hdr->frame_control) && (ieee80211_get_tdls_action(skb, hdr_size) == - WLAN_TDLS_TEARDOWN)) + WLAN_TDLS_TEARDOWN)) { ieee80211_tdls_td_tx_handle(local, sdata, skb, info->flags); - else + } else if (ieee80211_s1g_is_twt_setup(skb)) { + if (!acked) { + struct sk_buff *qskb; + + qskb = skb_clone(skb, GFP_ATOMIC); + if (qskb) { + skb_queue_tail(&sdata->status_queue, + qskb); + ieee80211_queue_work(&local->hw, + &sdata->work); + } + } + } else { ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control, acked); + } } rcu_read_unlock(); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index f6ef15366938..9e8381bef7ed 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2825,6 +2825,73 @@ DEFINE_EVENT(sta_flag_evt, drv_sta_set_decap_offload, TP_ARGS(local, sdata, sta, enabled) ); +TRACE_EVENT(drv_add_twt_setup, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, + struct ieee80211_twt_setup *twt, + struct ieee80211_twt_params *twt_agrt), + + TP_ARGS(local, sta, twt, twt_agrt), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, dialog_token) + __field(u8, control) + __field(__le16, req_type) + __field(__le64, twt) + __field(u8, duration) + __field(__le16, mantissa) + __field(u8, channel) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->dialog_token = twt->dialog_token; + __entry->control = twt->control; + __entry->req_type = twt_agrt->req_type; + __entry->twt = twt_agrt->twt; + __entry->duration = twt_agrt->min_twt_dur; + __entry->mantissa = twt_agrt->mantissa; + __entry->channel = twt_agrt->channel; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT + " token:%d control:0x%02x req_type:0x%04x" + " twt:%llu duration:%d mantissa:%d channel:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->dialog_token, + __entry->control, le16_to_cpu(__entry->req_type), + le64_to_cpu(__entry->twt), __entry->duration, + le16_to_cpu(__entry->mantissa), __entry->channel + ) +); + +TRACE_EVENT(drv_twt_teardown_request, + TP_PROTO(struct ieee80211_local *local, + struct ieee80211_sta *sta, u8 flowid), + + TP_ARGS(local, sta, flowid), + + TP_STRUCT__entry( + LOCAL_ENTRY + STA_ENTRY + __field(u8, flowid) + ), + + TP_fast_assign( + LOCAL_ASSIGN; + STA_ASSIGN; + __entry->flowid = flowid; + ), + + TP_printk( + LOCAL_PR_FMT STA_PR_FMT " flowid:%d", + LOCAL_PR_ARG, STA_PR_ARG, __entry->flowid + ) +); + #endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 05e96212b104..49cb96d25169 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1336,6 +1336,18 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, elems->rsnx = pos; elems->rsnx_len = elen; break; + case WLAN_EID_TX_POWER_ENVELOPE: + if (elen < 1 || + elen > sizeof(struct ieee80211_tx_pwr_env)) + break; + + if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env)) + break; + + elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos; + elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen; + elems->tx_pwr_env_num++; + break; case WLAN_EID_EXTENSION: ieee80211_parse_extension_element(calc_crc ? &crc : NULL, diff --git a/net/mptcp/options.c b/net/mptcp/options.c index bec3ed82e253..c41273cefc51 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -81,12 +81,11 @@ static void mptcp_parse_option(const struct sk_buff *skb, * is if both hosts in their SYNs set A=0." */ if (flags & MPTCP_CAP_CHECKSUM_REQD) - mp_opt->csum_reqd = 1; + mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; - if (flags & MPTCP_CAP_DENY_JOIN_ID0) - mp_opt->deny_join_id0 = 1; + mp_opt->deny_join_id0 = !!(flags & MPTCP_CAP_DENY_JOIN_ID0); - mp_opt->mp_capable = 1; + mp_opt->suboptions |= OPTIONS_MPTCP_MPC; if (opsize >= TCPOLEN_MPTCP_MPC_SYNACK) { mp_opt->sndr_key = get_unaligned_be64(ptr); ptr += 8; @@ -101,7 +100,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, * equivalent to those in a DSS option and can be used * interchangeably." */ - mp_opt->dss = 1; + mp_opt->suboptions |= OPTION_MPTCP_DSS; mp_opt->use_map = 1; mp_opt->mpc_map = 1; mp_opt->data_len = get_unaligned_be16(ptr); @@ -109,7 +108,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, } if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) { mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); - mp_opt->csum_reqd = 1; + mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; ptr += 2; } pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d csum=%u", @@ -118,7 +117,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, break; case MPTCPOPT_MP_JOIN: - mp_opt->mp_join = 1; + mp_opt->suboptions |= OPTIONS_MPTCP_MPJ; if (opsize == TCPOLEN_MPTCP_MPJ_SYN) { mp_opt->backup = *ptr++ & MPTCPOPT_BACKUP; mp_opt->join_id = *ptr++; @@ -144,7 +143,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); pr_debug("MP_JOIN hmac"); } else { - mp_opt->mp_join = 0; + mp_opt->suboptions &= ~OPTIONS_MPTCP_MPJ; } break; @@ -192,8 +191,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, opsize != expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) break; - mp_opt->dss = 1; - + mp_opt->suboptions |= OPTION_MPTCP_DSS; if (mp_opt->use_ack) { if (mp_opt->ack64) { mp_opt->data_ack = get_unaligned_be64(ptr); @@ -222,14 +220,15 @@ static void mptcp_parse_option(const struct sk_buff *skb, ptr += 2; if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) { - mp_opt->csum_reqd = 1; + mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD; mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr); ptr += 2; } pr_debug("data_seq=%llu subflow_seq=%u data_len=%u csum=%d:%u", mp_opt->data_seq, mp_opt->subflow_seq, - mp_opt->data_len, mp_opt->csum_reqd, mp_opt->csum); + mp_opt->data_len, !!(mp_opt->suboptions & OPTION_MPTCP_CSUMREQD), + mp_opt->csum); } break; @@ -260,8 +259,10 @@ static void mptcp_parse_option(const struct sk_buff *skb, break; } - mp_opt->add_addr = 1; + mp_opt->suboptions |= OPTION_MPTCP_ADD_ADDR; mp_opt->addr.id = *ptr++; + mp_opt->addr.port = 0; + mp_opt->ahmac = 0; if (mp_opt->addr.family == AF_INET) { memcpy((u8 *)&mp_opt->addr.addr.s_addr, (u8 *)ptr, 4); ptr += 4; @@ -298,7 +299,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, ptr++; - mp_opt->rm_addr = 1; + mp_opt->suboptions |= OPTION_MPTCP_RM_ADDR; mp_opt->rm_list.nr = opsize - TCPOLEN_MPTCP_RM_ADDR_BASE; for (i = 0; i < mp_opt->rm_list.nr; i++) mp_opt->rm_list.ids[i] = *ptr++; @@ -309,7 +310,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, if (opsize != TCPOLEN_MPTCP_PRIO) break; - mp_opt->mp_prio = 1; + mp_opt->suboptions |= OPTION_MPTCP_PRIO; mp_opt->backup = *ptr++ & MPTCP_PRIO_BKUP; pr_debug("MP_PRIO: prio=%d", mp_opt->backup); break; @@ -321,7 +322,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, ptr += 2; mp_opt->rcvr_key = get_unaligned_be64(ptr); ptr += 8; - mp_opt->fastclose = 1; + mp_opt->suboptions |= OPTION_MPTCP_FASTCLOSE; break; case MPTCPOPT_RST: @@ -330,7 +331,8 @@ static void mptcp_parse_option(const struct sk_buff *skb, if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) break; - mp_opt->reset = 1; + + mp_opt->suboptions |= OPTION_MPTCP_RST; flags = *ptr++; mp_opt->reset_transient = flags & MPTCP_RST_TRANSIENT; mp_opt->reset_reason = *ptr; @@ -341,7 +343,7 @@ static void mptcp_parse_option(const struct sk_buff *skb, break; ptr += 2; - mp_opt->mp_fail = 1; + mp_opt->suboptions |= OPTION_MPTCP_FAIL; mp_opt->fail_seq = get_unaligned_be64(ptr); pr_debug("MP_FAIL: data_seq=%llu", mp_opt->fail_seq); break; @@ -355,26 +357,12 @@ void mptcp_get_options(const struct sock *sk, const struct sk_buff *skb, struct mptcp_options_received *mp_opt) { - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - struct mptcp_sock *msk = mptcp_sk(subflow->conn); const struct tcphdr *th = tcp_hdr(skb); const unsigned char *ptr; int length; /* initialize option status */ - mp_opt->mp_capable = 0; - mp_opt->mp_join = 0; - mp_opt->add_addr = 0; - mp_opt->ahmac = 0; - mp_opt->fastclose = 0; - mp_opt->addr.port = 0; - mp_opt->rm_addr = 0; - mp_opt->dss = 0; - mp_opt->mp_prio = 0; - mp_opt->reset = 0; - mp_opt->csum_reqd = READ_ONCE(msk->csum_enabled); - mp_opt->deny_join_id0 = 0; - mp_opt->mp_fail = 0; + mp_opt->suboptions = 0; length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); @@ -928,7 +916,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, */ if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 && TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq && - subflow->mp_join && mp_opt->mp_join && + subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) && READ_ONCE(msk->pm.server_side)) tcp_send_ack(ssk); goto fully_established; @@ -945,8 +933,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, return subflow->mp_capable; } - if ((mp_opt->dss && mp_opt->use_ack) || - (mp_opt->add_addr && !mp_opt->echo)) { + if (((mp_opt->suboptions & OPTION_MPTCP_DSS) && mp_opt->use_ack) || + ((mp_opt->suboptions & OPTION_MPTCP_ADD_ADDR) && !mp_opt->echo)) { /* subflows are fully established as soon as we get any * additional ack, including ADD_ADDR. */ @@ -959,7 +947,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, * then fallback to TCP. Fallback scenarios requires a reset for * MP_JOIN subflows. */ - if (!mp_opt->mp_capable) { + if (!(mp_opt->suboptions & OPTIONS_MPTCP_MPC)) { if (subflow->mp_join) goto reset; subflow->mp_capable = 0; @@ -1123,53 +1111,50 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) return sk->sk_state != TCP_CLOSE; - if (mp_opt.fastclose && - msk->local_key == mp_opt.rcvr_key) { - WRITE_ONCE(msk->rcv_fastclose, true); - mptcp_schedule_work((struct sock *)msk); - } - - if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) { - if (!mp_opt.echo) { - mptcp_pm_add_addr_received(msk, &mp_opt.addr); - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); - } else { - mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); - mptcp_pm_del_add_timer(msk, &mp_opt.addr, true); - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); + if (unlikely(mp_opt.suboptions != OPTION_MPTCP_DSS)) { + if ((mp_opt.suboptions & OPTION_MPTCP_FASTCLOSE) && + msk->local_key == mp_opt.rcvr_key) { + WRITE_ONCE(msk->rcv_fastclose, true); + mptcp_schedule_work((struct sock *)msk); } - if (mp_opt.addr.port) - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); + if ((mp_opt.suboptions & OPTION_MPTCP_ADD_ADDR) && + add_addr_hmac_valid(msk, &mp_opt)) { + if (!mp_opt.echo) { + mptcp_pm_add_addr_received(msk, &mp_opt.addr); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); + } else { + mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); + mptcp_pm_del_add_timer(msk, &mp_opt.addr, true); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); + } - mp_opt.add_addr = 0; - } + if (mp_opt.addr.port) + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); + } - if (mp_opt.rm_addr) { - mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list); - mp_opt.rm_addr = 0; - } + if (mp_opt.suboptions & OPTION_MPTCP_RM_ADDR) + mptcp_pm_rm_addr_received(msk, &mp_opt.rm_list); - if (mp_opt.mp_prio) { - mptcp_pm_mp_prio_received(sk, mp_opt.backup); - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX); - mp_opt.mp_prio = 0; - } + if (mp_opt.suboptions & OPTION_MPTCP_PRIO) { + mptcp_pm_mp_prio_received(sk, mp_opt.backup); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIORX); + } - if (mp_opt.mp_fail) { - mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq); - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX); - mp_opt.mp_fail = 0; - } + if (mp_opt.suboptions & OPTION_MPTCP_FAIL) { + mptcp_pm_mp_fail_received(sk, mp_opt.fail_seq); + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILRX); + } - if (mp_opt.reset) { - subflow->reset_seen = 1; - subflow->reset_reason = mp_opt.reset_reason; - subflow->reset_transient = mp_opt.reset_transient; - } + if (mp_opt.suboptions & OPTION_MPTCP_RST) { + subflow->reset_seen = 1; + subflow->reset_reason = mp_opt.reset_reason; + subflow->reset_transient = mp_opt.reset_transient; + } - if (!mp_opt.dss) - return true; + if (!(mp_opt.suboptions & OPTION_MPTCP_DSS)) + return true; + } /* we can't wait for recvmsg() to update the ack_seq, otherwise * monodirectional flows will stuck @@ -1197,7 +1182,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) memset(mpext, 0, sizeof(*mpext)); - if (mp_opt.use_map) { + if (likely(mp_opt.use_map)) { if (mp_opt.mpc_map) { /* this is an MP_CAPABLE carrying MPTCP data * we know this map the first chunk of data @@ -1217,7 +1202,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) } mpext->data_len = mp_opt.data_len; mpext->use_map = 1; - mpext->csum_reqd = mp_opt.csum_reqd; + mpext->csum_reqd = !!(mp_opt.suboptions & OPTION_MPTCP_CSUMREQD); if (mpext->csum_reqd) mpext->csum = mp_opt.csum; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 22214a58d892..ade648c3512b 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1515,15 +1515,19 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags) mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); - /* try to keep the subflow socket lock across - * consecutive xmit on the same socket + /* First check. If the ssk has changed since + * the last round, release prev_ssk */ if (ssk != prev_ssk && prev_ssk) mptcp_push_release(sk, prev_ssk, &info); if (!ssk) goto out; - if (ssk != prev_ssk || !prev_ssk) + /* Need to lock the new subflow only if different + * from the previous one, otherwise we are still + * helding the relevant lock + */ + if (ssk != prev_ssk) lock_sock(ssk); /* keep it simple and always provide a new skb for the @@ -2832,7 +2836,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->token = subflow_req->token; msk->subflow = NULL; WRITE_ONCE(msk->fully_established, false); - if (mp_opt->csum_reqd) + if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) WRITE_ONCE(msk->csum_enabled, true); msk->write_seq = subflow_req->idsn + 1; @@ -2841,7 +2845,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd; msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; - if (mp_opt->mp_capable) { + if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) { msk->can_ack = true; msk->remote_key = mp_opt->sndr_key; mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 57a50b1194a9..d7aba1c4dc48 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -29,6 +29,13 @@ #define OPTION_MPTCP_DSS BIT(11) #define OPTION_MPTCP_FAIL BIT(12) +#define OPTION_MPTCP_CSUMREQD BIT(13) + +#define OPTIONS_MPTCP_MPC (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_SYNACK | \ + OPTION_MPTCP_MPC_ACK) +#define OPTIONS_MPTCP_MPJ (OPTION_MPTCP_MPJ_SYN | OPTION_MPTCP_MPJ_SYNACK | \ + OPTION_MPTCP_MPJ_SYNACK) + /* MPTCP option subtypes */ #define MPTCPOPT_MP_CAPABLE 0 #define MPTCPOPT_MP_JOIN 1 @@ -132,36 +139,27 @@ struct mptcp_options_received { u32 subflow_seq; u16 data_len; __sum16 csum; - u16 mp_capable : 1, - mp_join : 1, - fastclose : 1, - reset : 1, - dss : 1, - add_addr : 1, - rm_addr : 1, - mp_prio : 1, - mp_fail : 1, - echo : 1, - csum_reqd : 1, - backup : 1, - deny_join_id0 : 1; + u16 suboptions; u32 token; u32 nonce; - u64 thmac; - u8 hmac[MPTCPOPT_HMAC_LEN]; - u8 join_id; - u8 use_map:1, + u16 use_map:1, dsn64:1, data_fin:1, use_ack:1, ack64:1, mpc_map:1, + reset_reason:4, + reset_transient:1, + echo:1, + backup:1, + deny_join_id0:1, __unused:2; + u8 join_id; + u64 thmac; + u8 hmac[MPTCPOPT_HMAC_LEN]; struct mptcp_addr_info addr; struct mptcp_rm_list rm_list; u64 ahmac; - u8 reset_reason:4; - u8 reset_transient:1; u64 fail_seq; }; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 54b7ffc21861..1de7ce883c37 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -141,6 +141,7 @@ static int subflow_check_req(struct request_sock *req, struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); struct mptcp_options_received mp_opt; + bool opt_mp_capable, opt_mp_join; pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); @@ -154,16 +155,18 @@ static int subflow_check_req(struct request_sock *req, mptcp_get_options(sk_listener, skb, &mp_opt); - if (mp_opt.mp_capable) { + opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); + opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); + if (opt_mp_capable) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); - if (mp_opt.mp_join) + if (opt_mp_join) return 0; - } else if (mp_opt.mp_join) { + } else if (opt_mp_join) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); } - if (mp_opt.mp_capable && listener->request_mptcp) { + if (opt_mp_capable && listener->request_mptcp) { int err, retries = MPTCP_TOKEN_MAX_RETRIES; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; @@ -194,7 +197,7 @@ again: else SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT); - } else if (mp_opt.mp_join && listener->request_mptcp) { + } else if (opt_mp_join && listener->request_mptcp) { subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; subflow_req->mp_join = 1; subflow_req->backup = mp_opt.backup; @@ -243,15 +246,18 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); struct mptcp_options_received mp_opt; + bool opt_mp_capable, opt_mp_join; int err; subflow_init_req(req, sk_listener); mptcp_get_options(sk_listener, skb, &mp_opt); - if (mp_opt.mp_capable && mp_opt.mp_join) + opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC); + opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ); + if (opt_mp_capable && opt_mp_join) return -EINVAL; - if (mp_opt.mp_capable && listener->request_mptcp) { + if (opt_mp_capable && listener->request_mptcp) { if (mp_opt.sndr_key == 0) return -EINVAL; @@ -262,7 +268,7 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, subflow_req->mp_capable = 1; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; - } else if (mp_opt.mp_join && listener->request_mptcp) { + } else if (opt_mp_join && listener->request_mptcp) { if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) return -EINVAL; @@ -394,7 +400,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); - /* be sure no special action on any packet other than syn-ack */ if (subflow->conn_finished) return; @@ -407,7 +412,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) mptcp_get_options(sk, skb, &mp_opt); if (subflow->request_mptcp) { - if (!mp_opt.mp_capable) { + if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); mptcp_do_fallback(sk); @@ -415,7 +420,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) goto fallback; } - if (mp_opt.csum_reqd) + if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD) WRITE_ONCE(mptcp_sk(parent)->csum_enabled, true); if (mp_opt.deny_join_id0) WRITE_ONCE(mptcp_sk(parent)->pm.remote_deny_join_id0, true); @@ -430,7 +435,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) } else if (subflow->request_join) { u8 hmac[SHA256_DIGEST_SIZE]; - if (!mp_opt.mp_join) { + if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ)) { subflow->reset_reason = MPTCP_RST_EMPTCP; goto do_reset; } @@ -636,10 +641,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); - /* After child creation we must look for 'mp_capable' even when options + /* After child creation we must look for MPC even when options * are not parsed */ - mp_opt.mp_capable = 0; + mp_opt.suboptions = 0; /* hopefully temporary handling for MP_JOIN+syncookie */ subflow_req = mptcp_subflow_rsk(req); @@ -659,7 +664,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, * options. */ mptcp_get_options(sk, skb, &mp_opt); - if (!mp_opt.mp_capable) { + if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) { fallback = true; goto create_child; } @@ -669,7 +674,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, fallback = true; } else if (subflow_req->mp_join) { mptcp_get_options(sk, skb, &mp_opt); - if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) || + if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) || + !subflow_hmac_valid(req, &mp_opt) || !mptcp_can_accept_new_subflow(subflow_req->msk)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); fallback = true; @@ -726,7 +732,7 @@ create_child: /* with OoO packets we can reach here without ingress * mpc option */ - if (mp_opt.mp_capable) + if (mp_opt.suboptions & OPTIONS_MPTCP_MPC) mptcp_subflow_fully_established(ctx, &mp_opt); } else if (ctx->mp_join) { struct mptcp_sock *owner; diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c index 1dc955ca57d3..fa611678af05 100644 --- a/net/qrtr/mhi.c +++ b/net/qrtr/mhi.c @@ -15,7 +15,6 @@ struct qrtr_mhi_dev { struct qrtr_endpoint ep; struct mhi_device *mhi_dev; struct device *dev; - struct completion ready; }; /* From MHI to QRTR */ @@ -51,10 +50,6 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep); int rc; - rc = wait_for_completion_interruptible(&qdev->ready); - if (rc) - goto free_skb; - if (skb->sk) sock_hold(skb->sk); @@ -84,7 +79,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, int rc; /* start channels */ - rc = mhi_prepare_for_transfer(mhi_dev, 0); + rc = mhi_prepare_for_transfer(mhi_dev); if (rc) return rc; @@ -101,15 +96,6 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev, if (rc) return rc; - /* start channels */ - rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); - if (rc) { - qrtr_endpoint_unregister(&qdev->ep); - dev_set_drvdata(&mhi_dev->dev, NULL); - return rc; - } - - complete_all(&qdev->ready); dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n"); return 0; diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 6c61b7b1838f..b8508e35d20e 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) goto err; } - if (len != ALIGN(size, 4) + hdrlen) + if (!size || len != ALIGN(size, 4) + hdrlen) goto err; if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 925924fab1ab..1f857ffd1ac2 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -660,6 +660,13 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); q->nbands = nbands; + for (i = nstrict; i < q->nstrict; i++) { + INIT_LIST_HEAD(&q->classes[i].alist); + if (q->classes[i].qdisc->q.qlen) { + list_add_tail(&q->classes[i].alist, &q->active); + q->classes[i].deficit = quanta[i]; + } + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap)); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index d66a8e44a1ae..dbb41821b1b8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -835,7 +835,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) rqstp->rq_stime = ktime_get(); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); - } + } else + svc_xprt_received(xprt); out: trace_svc_handle_xprt(xprt, len); return len; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index c2d0ff7f089f..df87c7f3a049 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -171,9 +171,11 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; + enum nl80211_dfs_regions dfs_region; rcu_read_lock(); regd = get_cfg80211_regdom(); + dfs_region = regd->dfs_region; if (!wiphy) goto out; @@ -182,6 +184,11 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) if (!wiphy_regd) goto out; + if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { + dfs_region = wiphy_regd->dfs_region; + goto out; + } + if (wiphy_regd->dfs_region == regd->dfs_region) goto out; @@ -193,7 +200,7 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) out: rcu_read_unlock(); - return regd->dfs_region; + return dfs_region; } static void rcu_free_regdom(const struct ieee80211_regdomain *r) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7f881f5a5897..37d17a79617c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3157,6 +3157,11 @@ ok: return dst; nopol: + if (!(dst_orig->dev->flags & IFF_LOOPBACK) && + !xfrm_default_allow(net, dir)) { + err = -EPERM; + goto error; + } if (!(flags & XFRM_LOOKUP_ICMP)) { dst = dst_orig; goto ok; @@ -3545,6 +3550,11 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, } if (!pol) { + if (!xfrm_default_allow(net, dir)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); + return 0; + } + if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { xfrm_secpath_reject(xerr_idx, skb, &fl); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); @@ -3599,6 +3609,12 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, tpp[ti++] = &pols[pi]->xfrm_vec[i]; } xfrm_nr = ti; + + if (!xfrm_default_allow(net, dir) && !xfrm_nr) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); + goto reject; + } + if (npols > 1) { xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); tpp = stp; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7aff641c717d..03b66d154b2b 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1961,6 +1961,59 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, return skb; } +static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, + struct nlattr **attrs) +{ + struct net *net = sock_net(skb->sk); + struct xfrm_userpolicy_default *up = nlmsg_data(nlh); + u8 dirmask; + u8 old_default = net->xfrm.policy_default; + + if (up->dirmask >= XFRM_USERPOLICY_DIRMASK_MAX) + return -EINVAL; + + dirmask = (1 << up->dirmask) & XFRM_POL_DEFAULT_MASK; + + net->xfrm.policy_default = (old_default & (0xff ^ dirmask)) + | (up->action << up->dirmask); + + rt_genid_bump_all(net); + + return 0; +} + +static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh, + struct nlattr **attrs) +{ + struct sk_buff *r_skb; + struct nlmsghdr *r_nlh; + struct net *net = sock_net(skb->sk); + struct xfrm_userpolicy_default *r_up, *up; + int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); + u32 portid = NETLINK_CB(skb).portid; + u32 seq = nlh->nlmsg_seq; + + up = nlmsg_data(nlh); + + r_skb = nlmsg_new(len, GFP_ATOMIC); + if (!r_skb) + return -ENOMEM; + + r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0); + if (!r_nlh) { + kfree_skb(r_skb); + return -EMSGSIZE; + } + + r_up = nlmsg_data(r_nlh); + + r_up->action = ((net->xfrm.policy_default & (1 << up->dirmask)) >> up->dirmask); + r_up->dirmask = up->dirmask; + nlmsg_end(r_skb, r_nlh); + + return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid); +} + static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs) { @@ -2664,6 +2717,8 @@ const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), + [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), + [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), }; EXPORT_SYMBOL_GPL(xfrm_msg_min); @@ -2743,6 +2798,8 @@ static const struct xfrm_link { .nla_pol = xfrma_spd_policy, .nla_max = XFRMA_SPD_MAX }, [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, + [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default }, + [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default }, }; static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh index 30a610b541ad..99ec0688b044 100755 --- a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh +++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh @@ -89,14 +89,21 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "burst $BURST" done +# Run if user hits control-c +function print_result() { + # Print results + for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do + dev=${DEV}@${thread} + echo "Device: $dev" + cat /proc/net/pktgen/$dev | grep -A2 "Result:" + done +} +# trap keyboard interrupt (Ctrl-C) +trap true SIGINT + # start_run echo "Running... ctrl^C to stop" >&2 pg_ctrl "start" echo "Done" >&2 -# Print results -for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do - dev=${DEV}@${thread} - echo "Device: $dev" - cat /proc/net/pktgen/$dev | grep -A2 "Result:" -done +print_result diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh index a6195bd77532..04b0dd0c36d6 100755 --- a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh +++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh @@ -69,14 +69,21 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "xmit_mode queue_xmit" done +# Run if user hits control-c +function print_result { + # Print results + for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do + dev=${DEV}@${thread} + echo "Device: $dev" + cat /proc/net/pktgen/$dev | grep -A2 "Result:" + done +} +# trap keyboard interrupt (Ctrl-C) +trap true SIGINT + # start_run echo "Running... ctrl^C to stop" >&2 pg_ctrl "start" echo "Done" >&2 -# Print results -for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do - dev=${DEV}@${thread} - echo "Device: $dev" - cat /proc/net/pktgen/$dev | grep -A2 "Result:" -done +print_result diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh index 246cfe02bb82..09a92ea963f9 100755 --- a/samples/pktgen/pktgen_sample01_simple.sh +++ b/samples/pktgen/pktgen_sample01_simple.sh @@ -79,15 +79,22 @@ pg_set $DEV "flag UDPSRC_RND" pg_set $DEV "udp_src_min $UDP_SRC_MIN" pg_set $DEV "udp_src_max $UDP_SRC_MAX" +# Run if user hits control-c +function print_result() { + # Print results + echo "Result device: $DEV" + cat /proc/net/pktgen/$DEV +} +# trap keyboard interrupt (Ctrl-C) +trap true SIGINT + if [ -z "$APPEND" ]; then # start_run echo "Running... ctrl^C to stop" >&2 pg_ctrl "start" echo "Done" >&2 - # Print results - echo "Result device: $DEV" - cat /proc/net/pktgen/$DEV + print_result else echo "Append mode: config done. Do more or use 'pg_ctrl start' to run" fi
\ No newline at end of file diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh index c6af3d9d5171..7fa41c84c32f 100755 --- a/samples/pktgen/pktgen_sample02_multiqueue.sh +++ b/samples/pktgen/pktgen_sample02_multiqueue.sh @@ -83,18 +83,25 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "udp_src_max $UDP_SRC_MAX" done -if [ -z "$APPEND" ]; then - # start_run - echo "Running... ctrl^C to stop" >&2 - pg_ctrl "start" - echo "Done" >&2 - +# Run if user hits control-c +function print_result() { # Print results for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do dev=${DEV}@${thread} echo "Device: $dev" cat /proc/net/pktgen/$dev | grep -A2 "Result:" done +} +# trap keyboard interrupt (Ctrl-C) +trap true SIGINT + +if [ -z "$APPEND" ]; then + # start_run + echo "Running... ctrl^C to stop" >&2 + pg_ctrl "start" + echo "Done" >&2 + + print_result else echo "Append mode: config done. Do more or use 'pg_ctrl start' to run" fi diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh index ab87de440277..8bf2fdffba16 100755 --- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh +++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh @@ -85,7 +85,7 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do done # Run if user hits control-c -function control_c() { +function print_result() { # Print results for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do dev=${DEV}@${thread} @@ -94,11 +94,13 @@ function control_c() { done } # trap keyboard interrupt (Ctrl-C) -trap control_c SIGINT +trap true SIGINT if [ -z "$APPEND" ]; then echo "Running... ctrl^C to stop" >&2 pg_ctrl "start" + + print_result else echo "Append mode: config done. Do more or use 'pg_ctrl start' to run" fi diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh index 7c27923083a6..264cc5db9c49 100755 --- a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh +++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh @@ -100,12 +100,8 @@ for ((i = 0; i < $THREADS; i++)); do pg_set $dev "udp_src_max $UDP_SRC_MAX" done -# start_run -if [ -z "$APPEND" ]; then - echo "Running... ctrl^C to stop" >&2 - pg_ctrl "start" - echo "Done" >&2 - +# Run if user hits control-c +function print_result() { # Print results for ((i = 0; i < $THREADS; i++)); do thread=${cpu_array[$((i+F_THREAD))]} @@ -113,6 +109,17 @@ if [ -z "$APPEND" ]; then echo "Device: $dev" cat /proc/net/pktgen/$dev | grep -A2 "Result:" done +} +# trap keyboard interrupt (Ctrl-C) +trap true SIGINT + +# start_run +if [ -z "$APPEND" ]; then + echo "Running... ctrl^C to stop" >&2 + pg_ctrl "start" + echo "Done" >&2 + + print_result else echo "Append mode: config done. Do more or use 'pg_ctrl start' to run" fi diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 96f32eaa24df..7ad689f991e7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6658,6 +6658,7 @@ enum { ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, ALC623_FIXUP_LENOVO_THINKSTATION_P340, ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, + ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST, }; static const struct hda_fixup alc269_fixups[] = { @@ -8242,6 +8243,12 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC }, + [ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_limit_int_mic_boost, + .chained = true, + .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8438,8 +8445,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), - SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 5db2f4865bbb..905c7965f653 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, snd_pcm_uframes_t period_size; ssize_t periodbytes; ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); - u32 buffer_addr = substream->runtime->dma_addr; + u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); channels = substream->runtime->channels; period_size = substream->runtime->period_size; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index eb15f319aa57..b3610fdd1fee 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -230,6 +230,7 @@ enum { IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */ IFLA_INET6_TOKEN, /* device token */ IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */ + IFLA_INET6_RA_MTU, /* mtu carried in the RA message */ __IFLA_INET6_MAX }; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 4f9f73e7a299..378c0aac5a1a 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -26,6 +26,7 @@ TEST_PROGS += unicast_extensions.sh TEST_PROGS += udpgro_fwd.sh TEST_PROGS += veth.sh TEST_PROGS += ioam6.sh +TEST_PROGS += gro.sh TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh index 794d2bf36dd7..342ad27f631b 100755 --- a/tools/testing/selftests/net/gro.sh +++ b/tools/testing/selftests/net/gro.sh @@ -1,45 +1,14 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -source setup_loopback.sh readonly SERVER_MAC="aa:00:00:00:00:02" readonly CLIENT_MAC="aa:00:00:00:00:01" readonly TESTS=("data" "ack" "flags" "tcp" "ip" "large") readonly PROTOS=("ipv4" "ipv6") -dev="eth0" +dev="" test="all" proto="ipv4" -setup_interrupt() { - # Use timer on host to trigger the network stack - # Also disable device interrupt to not depend on NIC interrupt - # Reduce test flakiness caused by unexpected interrupts - echo 100000 >"${FLUSH_PATH}" - echo 50 >"${IRQ_PATH}" -} - -setup_ns() { - # Set up server_ns namespace and client_ns namespace - setup_macvlan_ns "${dev}" server_ns server "${SERVER_MAC}" - setup_macvlan_ns "${dev}" client_ns client "${CLIENT_MAC}" -} - -cleanup_ns() { - cleanup_macvlan_ns server_ns server client_ns client -} - -setup() { - setup_loopback_environment "${dev}" - setup_interrupt -} - -cleanup() { - cleanup_loopback "${dev}" - - echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}" - echo "${HARD_IRQS}" >"${IRQ_PATH}" -} - run_test() { local server_pid=0 local exit_code=0 @@ -115,10 +84,12 @@ while getopts "i:t:p:" opt; do esac done -readonly FLUSH_PATH="/sys/class/net/${dev}/gro_flush_timeout" -readonly IRQ_PATH="/sys/class/net/${dev}/napi_defer_hard_irqs" -readonly FLUSH_TIMEOUT="$(< ${FLUSH_PATH})" -readonly HARD_IRQS="$(< ${IRQ_PATH})" +if [ -n "$dev" ]; then + source setup_loopback.sh +else + source setup_veth.sh +fi + setup trap cleanup EXIT if [[ "${test}" == "all" ]]; then diff --git a/tools/testing/selftests/net/setup_loopback.sh b/tools/testing/selftests/net/setup_loopback.sh index 0a8ad97b07ea..e57bbfbc5208 100755 --- a/tools/testing/selftests/net/setup_loopback.sh +++ b/tools/testing/selftests/net/setup_loopback.sh @@ -1,5 +1,11 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 + +readonly FLUSH_PATH="/sys/class/net/${dev}/gro_flush_timeout" +readonly IRQ_PATH="/sys/class/net/${dev}/napi_defer_hard_irqs" +readonly FLUSH_TIMEOUT="$(< ${FLUSH_PATH})" +readonly HARD_IRQS="$(< ${IRQ_PATH})" + netdev_check_for_carrier() { local -r dev="$1" @@ -18,7 +24,7 @@ netdev_check_for_carrier() { # Assumes that there is no existing ipvlan device on the physical device setup_loopback_environment() { - local dev="$1" + local dev="$1" # Fail hard if cannot turn on loopback mode for current NIC ethtool -K "${dev}" loopback on || exit 1 @@ -80,3 +86,33 @@ cleanup_loopback(){ exit 1 fi } + +setup_interrupt() { + # Use timer on host to trigger the network stack + # Also disable device interrupt to not depend on NIC interrupt + # Reduce test flakiness caused by unexpected interrupts + echo 100000 >"${FLUSH_PATH}" + echo 50 >"${IRQ_PATH}" +} + +setup_ns() { + # Set up server_ns namespace and client_ns namespace + setup_macvlan_ns "${dev}" server_ns server "${SERVER_MAC}" + setup_macvlan_ns "${dev}" client_ns client "${CLIENT_MAC}" +} + +cleanup_ns() { + cleanup_macvlan_ns server_ns server client_ns client +} + +setup() { + setup_loopback_environment "${dev}" + setup_interrupt +} + +cleanup() { + cleanup_loopback "${dev}" + + echo "${FLUSH_TIMEOUT}" >"${FLUSH_PATH}" + echo "${HARD_IRQS}" >"${IRQ_PATH}" +} diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh new file mode 100644 index 000000000000..1003ddf7b3b2 --- /dev/null +++ b/tools/testing/selftests/net/setup_veth.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +setup_veth_ns() { + local -r link_dev="$1" + local -r ns_name="$2" + local -r ns_dev="$3" + local -r ns_mac="$4" + + [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}" + echo 100000 > "/sys/class/net/${ns_dev}/gro_flush_timeout" + ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535 + ip -netns "${ns_name}" link set dev "${ns_dev}" up + + ip netns exec "${ns_name}" ethtool -K "${ns_dev}" gro on tso off +} + +setup_ns() { + # Set up server_ns namespace and client_ns namespace + ip link add name server type veth peer name client + + setup_veth_ns "${dev}" server_ns server "${SERVER_MAC}" + setup_veth_ns "${dev}" client_ns client "${CLIENT_MAC}" +} + +cleanup_ns() { + local ns_name + + for ns_name in client_ns server_ns; do + [[ -e /var/run/netns/"${ns_name}" ]] && ip netns del "${ns_name}" + done +} + +setup() { + # no global init setup step needed + : +} + +cleanup() { + cleanup_ns +} |