diff options
582 files changed, 12480 insertions, 5725 deletions
@@ -322,6 +322,8 @@ TripleX Chung <[email protected]> <[email protected]> Tsuneo Yoshioka <[email protected]> Tycho Andersen <[email protected]> <[email protected]> Uwe Kleine-König <[email protected]> +Uwe Kleine-König <[email protected]> +Uwe Kleine-König <[email protected]> Uwe Kleine-König <[email protected]> Uwe Kleine-König <[email protected]> Valdis Kletnieks <[email protected]> @@ -740,6 +740,11 @@ S: (ask for current address) S: Portland, Oregon S: USA +N: Jason Cooper +D: ARM/Marvell SOC co-maintainer +D: irqchip co-maintainer +D: MVEBU PCI DRIVER co-maintainer + N: Robin Cornelius D: Ralink rt2x00 WLAN driver diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 0b61a90f1592..e08c5a9d53da 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -16,6 +16,8 @@ Required properties: Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC. + Use "microchip,sama7g5-emac" for Microchip SAMA7G5 ethernet interface. + Use "microchip,sama7g5-gem" for Microchip SAMA7G5 gigabit ethernet interface. Or the generic form: "cdns,emac". - reg: Address and length of the register set for the device For "sifive,fu540-c000-gem", second range is required to specify the diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml index ca3904bf90e0..477066e2b821 100644 --- a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml +++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml @@ -76,7 +76,7 @@ examples: reg = <0x27>; interrupt-parent = <&gpa1>; - interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <3 IRQ_TYPE_EDGE_RISING>; en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>; wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/driver-api/auxiliary_bus.rst b/Documentation/driver-api/auxiliary_bus.rst new file mode 100644 index 000000000000..2312506b0674 --- /dev/null +++ b/Documentation/driver-api/auxiliary_bus.rst @@ -0,0 +1,234 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +============= +Auxiliary Bus +============= + +In some subsystems, the functionality of the core device (PCI/ACPI/other) is +too complex for a single device to be managed by a monolithic driver +(e.g. Sound Open Firmware), multiple devices might implement a common +intersection of functionality (e.g. NICs + RDMA), or a driver may want to +export an interface for another subsystem to drive (e.g. SIOV Physical Function +export Virtual Function management). A split of the functinoality into child- +devices representing sub-domains of functionality makes it possible to +compartmentalize, layer, and distribute domain-specific concerns via a Linux +device-driver model. + +An example for this kind of requirement is the audio subsystem where a single +IP is handling multiple entities such as HDMI, Soundwire, local devices such as +mics/speakers etc. The split for the core's functionality can be arbitrary or +be defined by the DSP firmware topology and include hooks for test/debug. This +allows for the audio core device to be minimal and focused on hardware-specific +control and communication. + +Each auxiliary_device represents a part of its parent functionality. The +generic behavior can be extended and specialized as needed by encapsulating an +auxiliary_device within other domain-specific structures and the use of .ops +callbacks. Devices on the auxiliary bus do not share any structures and the use +of a communication channel with the parent is domain-specific. + +Note that ops are intended as a way to augment instance behavior within a class +of auxiliary devices, it is not the mechanism for exporting common +infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey +infrastructure from the parent module to the auxiliary module(s). + + +When Should the Auxiliary Bus Be Used +===================================== + +The auxiliary bus is to be used when a driver and one or more kernel modules, +who share a common header file with the driver, need a mechanism to connect and +provide access to a shared object allocated by the auxiliary_device's +registering driver. The registering driver for the auxiliary_device(s) and the +kernel module(s) registering auxiliary_drivers can be from the same subsystem, +or from multiple subsystems. + +The emphasis here is on a common generic interface that keeps subsystem +customization out of the bus infrastructure. + +One example is a PCI network device that is RDMA-capable and exports a child +device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI +driver allocates and registers an auxiliary_device for each physical +function on the NIC. The RDMA driver registers an auxiliary_driver that claims +each of these auxiliary_devices. This conveys data/ops published by the parent +PCI device/driver to the RDMA auxiliary_driver. + +Another use case is for the PCI device to be split out into multiple sub +functions. For each sub function an auxiliary_device is created. A PCI sub +function driver binds to such devices that creates its own one or more class +devices. A PCI sub function auxiliary device is likely to be contained in a +struct with additional attributes such as user defined sub function number and +optional attributes such as resources and a link to the parent device. These +attributes could be used by systemd/udev; and hence should be initialized +before a driver binds to an auxiliary_device. + +A key requirement for utilizing the auxiliary bus is that there is no +dependency on a physical bus, device, register accesses or regmap support. +These individual devices split from the core cannot live on the platform bus as +they are not physical devices that are controlled by DT/ACPI. The same +argument applies for not using MFD in this scenario as MFD relies on individual +function devices being physical devices. + +Auxiliary Device +================ + +An auxiliary_device represents a part of its parent device's functionality. It +is given a name that, combined with the registering drivers KBUILD_MODNAME, +creates a match_name that is used for driver binding, and an id that combined +with the match_name provide a unique name to register with the bus subsystem. + +Registering an auxiliary_device is a two-step process. First call +auxiliary_device_init(), which checks several aspects of the auxiliary_device +struct and performs a device_initialize(). After this step completes, any +error state must have a call to auxiliary_device_uninit() in its resolution path. +The second step in registering an auxiliary_device is to perform a call to +auxiliary_device_add(), which sets the name of the device and add the device to +the bus. + +Unregistering an auxiliary_device is also a two-step process to mirror the +register process. First call auxiliary_device_delete(), then call +auxiliary_device_uninit(). + +.. code-block:: c + + struct auxiliary_device { + struct device dev; + const char *name; + u32 id; + }; + +If two auxiliary_devices both with a match_name "mod.foo" are registered onto +the bus, they must have unique id values (e.g. "x" and "y") so that the +registered devices names are "mod.foo.x" and "mod.foo.y". If match_name + id +are not unique, then the device_add fails and generates an error message. + +The auxiliary_device.dev.type.release or auxiliary_device.dev.release must be +populated with a non-NULL pointer to successfully register the auxiliary_device. + +The auxiliary_device.dev.parent must also be populated. + +Auxiliary Device Memory Model and Lifespan +------------------------------------------ + +The registering driver is the entity that allocates memory for the +auxiliary_device and register it on the auxiliary bus. It is important to note +that, as opposed to the platform bus, the registering driver is wholly +responsible for the management for the memory used for the driver object. + +A parent object, defined in the shared header file, contains the +auxiliary_device. It also contains a pointer to the shared object(s), which +also is defined in the shared header. Both the parent object and the shared +object(s) are allocated by the registering driver. This layout allows the +auxiliary_driver's registering module to perform a container_of() call to go +from the pointer to the auxiliary_device, that is passed during the call to the +auxiliary_driver's probe function, up to the parent object, and then have +access to the shared object(s). + +The memory for the auxiliary_device is freed only in its release() callback +flow as defined by its registering driver. + +The memory for the shared object(s) must have a lifespan equal to, or greater +than, the lifespan of the memory for the auxiliary_device. The auxiliary_driver +should only consider that this shared object is valid as long as the +auxiliary_device is still registered on the auxiliary bus. It is up to the +registering driver to manage (e.g. free or keep available) the memory for the +shared object beyond the life of the auxiliary_device. + +The registering driver must unregister all auxiliary devices before its own +driver.remove() is completed. + +Auxiliary Drivers +================= + +Auxiliary drivers follow the standard driver model convention, where +discovery/enumeration is handled by the core, and drivers +provide probe() and remove() methods. They support power management +and shutdown notifications using the standard conventions. + +.. code-block:: c + + struct auxiliary_driver { + int (*probe)(struct auxiliary_device *, + const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *); + void (*shutdown)(struct auxiliary_device *); + int (*suspend)(struct auxiliary_device *, pm_message_t); + int (*resume)(struct auxiliary_device *); + struct device_driver driver; + const struct auxiliary_device_id *id_table; + }; + +Auxiliary drivers register themselves with the bus by calling +auxiliary_driver_register(). The id_table contains the match_names of auxiliary +devices that a driver can bind with. + +Example Usage +============= + +Auxiliary devices are created and registered by a subsystem-level core device +that needs to break up its functionality into smaller fragments. One way to +extend the scope of an auxiliary_device is to encapsulate it within a domain- +pecific structure defined by the parent device. This structure contains the +auxiliary_device and any associated shared data/callbacks needed to establish +the connection with the parent. + +An example is: + +.. code-block:: c + + struct foo { + struct auxiliary_device auxdev; + void (*connect)(struct auxiliary_device *auxdev); + void (*disconnect)(struct auxiliary_device *auxdev); + void *data; + }; + +The parent device then registers the auxiliary_device by calling +auxiliary_device_init(), and then auxiliary_device_add(), with the pointer to +the auxdev member of the above structure. The parent provides a name for the +auxiliary_device that, combined with the parent's KBUILD_MODNAME, creates a +match_name that is be used for matching and binding with a driver. + +Whenever an auxiliary_driver is registered, based on the match_name, the +auxiliary_driver's probe() is invoked for the matching devices. The +auxiliary_driver can also be encapsulated inside custom drivers that make the +core device's functionality extensible by adding additional domain-specific ops +as follows: + +.. code-block:: c + + struct my_ops { + void (*send)(struct auxiliary_device *auxdev); + void (*receive)(struct auxiliary_device *auxdev); + }; + + + struct my_driver { + struct auxiliary_driver auxiliary_drv; + const struct my_ops ops; + }; + +An example of this type of usage is: + +.. code-block:: c + + const struct auxiliary_device_id my_auxiliary_id_table[] = { + { .name = "foo_mod.foo_dev" }, + { }, + }; + + const struct my_ops my_custom_ops = { + .send = my_tx, + .receive = my_rx, + }; + + const struct my_driver my_drv = { + .auxiliary_drv = { + .name = "myauxiliarydrv", + .id_table = my_auxiliary_id_table, + .probe = my_probe, + .remove = my_remove, + .shutdown = my_shutdown, + }, + .ops = my_custom_ops, + }; diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index f357f3eb400c..86759a74b7f1 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -72,6 +72,7 @@ available subsections can be seen below. thermal/index fpga/index acpi/index + auxiliary_bus backlight/lp855x-driver.rst connector console diff --git a/Documentation/networking/ppp_generic.rst b/Documentation/networking/ppp_generic.rst index e60504377900..5a10abce5964 100644 --- a/Documentation/networking/ppp_generic.rst +++ b/Documentation/networking/ppp_generic.rst @@ -314,6 +314,22 @@ channel are: it is connected to. It will return an EINVAL error if the channel is not connected to an interface. +* PPPIOCBRIDGECHAN bridges a channel with another. The argument should + point to an int containing the channel number of the channel to bridge + to. Once two channels are bridged, frames presented to one channel by + ppp_input() are passed to the bridge instance for onward transmission. + This allows frames to be switched from one channel into another: for + example, to pass PPPoE frames into a PPPoL2TP session. Since channel + bridging interrupts the normal ppp_input() path, a given channel may + not be part of a bridge at the same time as being part of a unit. + This ioctl will return an EALREADY error if the channel is already + part of a bridge or unit, or ENXIO if the requested channel does not + exist. + +* PPPIOCUNBRIDGECHAN performs the inverse of PPPIOCBRIDGECHAN, unbridging + a channel pair. This ioctl will return an EINVAL error if the channel + does not form part of a bridge. + * All other ioctl commands are passed to the channel ioctl() function. The ioctl calls that are available on an instance that is attached to diff --git a/MAINTAINERS b/MAINTAINERS index 321b5fb0dd92..5c1a6ba5ef26 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1486,10 +1486,20 @@ F: Documentation/devicetree/bindings/iommu/arm,smmu* F: drivers/iommu/arm/ F: drivers/iommu/io-pgtable-arm* +ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS) +M: Arnd Bergmann <[email protected]> +M: Olof Johansson <[email protected]> +L: [email protected] (moderated for non-subscribers) +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git +F: arch/arm/boot/dts/Makefile +F: arch/arm64/boot/dts/Makefile + ARM SUB-ARCHITECTURES L: [email protected] (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git F: arch/arm/mach-*/ F: arch/arm/plat-*/ @@ -2014,7 +2024,6 @@ M: Philipp Zabel <[email protected]> S: Maintained ARM/Marvell Dove/MV78xx0/Orion SOC support -M: Jason Cooper <[email protected]> M: Andrew Lunn <[email protected]> M: Sebastian Hesselbarth <[email protected]> M: Gregory Clement <[email protected]> @@ -2031,7 +2040,6 @@ F: arch/arm/plat-orion/ F: drivers/soc/dove/ ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K, CN9130 SOC support -M: Jason Cooper <[email protected]> M: Andrew Lunn <[email protected]> M: Gregory Clement <[email protected]> M: Sebastian Hesselbarth <[email protected]> @@ -3194,8 +3202,9 @@ F: drivers/mtd/devices/block2mtd.c BLUETOOTH DRIVERS M: Marcel Holtmann <[email protected]> M: Johan Hedberg <[email protected]> +M: Luiz Augusto von Dentz <[email protected]> -S: Maintained +S: Supported W: http://www.bluez.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git @@ -3204,8 +3213,9 @@ F: drivers/bluetooth/ BLUETOOTH SUBSYSTEM M: Marcel Holtmann <[email protected]> M: Johan Hedberg <[email protected]> +M: Luiz Augusto von Dentz <[email protected]> -S: Maintained +S: Supported W: http://www.bluez.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git @@ -3237,7 +3247,7 @@ R: Martin KaFai Lau <[email protected]> R: Song Liu <[email protected]> R: Yonghong Song <[email protected]> R: John Fastabend <[email protected]> -R: KP Singh <[email protected]> +R: KP Singh <[email protected]> S: Supported @@ -3356,7 +3366,7 @@ F: arch/x86/net/ X: arch/x86/net/bpf_jit_comp32.c BPF LSM (Security Audit and Enforcement using BPF) -M: KP Singh <[email protected]> +M: KP Singh <[email protected]> R: Florent Revest <[email protected]> R: Brendan Jackman <[email protected]> @@ -4285,6 +4295,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: scripts/clang-tools/ +F: scripts/lld-version.sh K: \b(?i:clang|llvm)\b CLEANCACHE API @@ -9247,7 +9258,6 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner <[email protected]> -M: Jason Cooper <[email protected]> M: Marc Zyngier <[email protected]> S: Maintained @@ -10548,6 +10558,13 @@ S: Supported F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst F: drivers/net/ethernet/marvell/octeontx2/af/ +MARVELL PRESTERA ETHERNET SWITCH DRIVER +M: Vadym Kochan <[email protected]> +M: Taras Chornyi <[email protected]> +S: Supported +W: https://github.com/Marvell-switching/switchdev-prestera +F: drivers/net/ethernet/marvell/prestera/ + MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER M: Nicolas Pitre <[email protected]> S: Odd Fixes @@ -13396,7 +13413,6 @@ F: drivers/pci/controller/mobiveil/pcie-mobiveil* PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni <[email protected]> -M: Jason Cooper <[email protected]> L: [email protected] (moderated for non-subscribers) S: Maintained @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -826,7 +826,9 @@ else DEBUG_CFLAGS += -g endif +ifneq ($(LLVM_IAS),1) KBUILD_AFLAGS += -Wa,-gdwarf-2 +endif ifdef CONFIG_DEBUG_INFO_DWARF4 DEBUG_CFLAGS += -gdwarf-4 @@ -944,7 +946,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # change __FILE__ to the relative path from the srctree -KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) +KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) # ensure -fcf-protection is disabled when using retpoline as it is # incompatible with -mindirect-branch=thunk-extern @@ -982,6 +984,12 @@ ifeq ($(CONFIG_RELR),y) LDFLAGS_vmlinux += --pack-dyn-relocs=relr endif +# We never want expected sections to be placed heuristically by the +# linker. All sections should be explicitly named in the linker script. +ifdef CONFIG_LD_ORPHAN_WARN +LDFLAGS_vmlinux += --orphan-handling=warn +endif + # Align the bit size of userspace programs with the kernel KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) diff --git a/arch/Kconfig b/arch/Kconfig index 56b6ccc0e32d..ba4e966484ab 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1028,6 +1028,15 @@ config HAVE_STATIC_CALL_INLINE bool depends on HAVE_STATIC_CALL +config ARCH_WANT_LD_ORPHAN_WARN + bool + help + An arch should select this symbol once all linker sections are explicitly + included, size-asserted, or discarded in the linker scripts. This is + important because we never want expected sections to be placed heuristically + by the linker, since the locations of such sections can change between linker + versions. + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fe2f17eb2b50..002e0cf025f5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -35,6 +35,7 @@ config ARM select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_LD_ORPHAN_WARN select BINFMT_FLAT_ARGVP_ENVP_ON_STACK select BUILDTIME_TABLE_SORT if MMU select CLONE_BACKWARDS diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 4d76eab2b22d..e15f76ca2887 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -16,10 +16,6 @@ LDFLAGS_vmlinux += --be8 KBUILD_LDFLAGS_MODULE += --be8 endif -# We never want expected sections to be placed heuristically by the -# linker. All sections should be explicitly named in the linker script. -LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) - GZFLAGS :=-9 #KBUILD_CFLAGS +=-pipe diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 47f001ca5499..e1567418a2b1 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -129,7 +129,9 @@ LDFLAGS_vmlinux += --no-undefined # Delete all temporary local symbols LDFLAGS_vmlinux += -X # Report orphan sections -LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) +ifdef CONFIG_LD_ORPHAN_WARN +LDFLAGS_vmlinux += --orphan-handling=warn +endif # Next argument is a linker script LDFLAGS_vmlinux += -T diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi index 265f5f3dbff6..24f793ca2886 100644 --- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi @@ -551,7 +551,7 @@ pinctrl_i2c3: i2c3grp { fsl,pins = < - MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1 + MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1 MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1 >; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi index 93909796885a..b9b698f72b26 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi @@ -166,7 +166,6 @@ MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030 - MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1 >; }; diff --git a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts index adde62d6fce7..342304f5653a 100644 --- a/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts +++ b/arch/arm/boot/dts/mmp2-olpc-xo-1-75.dts @@ -223,8 +223,7 @@ }; &ssp3 { - /delete-property/ #address-cells; - /delete-property/ #size-cells; + #address-cells = <0>; spi-slave; status = "okay"; ready-gpios = <&gpio 125 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index bb3987e101c2..0b3d9ae75650 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts @@ -132,7 +132,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_gmac_3v3>; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts index fce2f7fcd084..bf38c66c1815 100644 --- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts +++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts @@ -1,5 +1,5 @@ /* - * Copyright 2015 Adam Sampson <[email protected]> + * Copyright 2015-2020 Adam Sampson <[email protected]> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual @@ -115,7 +115,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm/boot/dts/sun8i-s3-pinecube.dts b/arch/arm/boot/dts/sun8i-s3-pinecube.dts index 9bab6b7f4014..4aa0ee897a0a 100644 --- a/arch/arm/boot/dts/sun8i-s3-pinecube.dts +++ b/arch/arm/boot/dts/sun8i-s3-pinecube.dts @@ -10,7 +10,7 @@ / { model = "PineCube IP Camera"; - compatible = "pine64,pinecube", "allwinner,sun8i-s3"; + compatible = "pine64,pinecube", "sochip,s3", "allwinner,sun8i-v3"; aliases { serial0 = &uart2; diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi index 0c7341676921..89abd4cc7e23 100644 --- a/arch/arm/boot/dts/sun8i-v3s.dtsi +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi @@ -539,7 +539,7 @@ gic: interrupt-controller@1c81000 { compatible = "arm,gic-400"; reg = <0x01c81000 0x1000>, - <0x01c82000 0x1000>, + <0x01c82000 0x2000>, <0x01c84000 0x2000>, <0x01c86000 0x2000>; interrupt-controller; diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts index 15c22b06fc4b..47954551f573 100644 --- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts +++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts @@ -120,7 +120,7 @@ pinctrl-names = "default"; pinctrl-0 = <&gmac_rgmii_pins>; phy-handle = <&phy1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-supply = <®_dc1sw>; status = "okay"; }; @@ -198,16 +198,16 @@ }; ®_dc1sw { - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; regulator-name = "vcc-gmac-phy"; }; ®_dcdc1 { regulator-always-on; - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-name = "vcc-3v0"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-3v3"; }; ®_dcdc2 { diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 34793aabdb65..58df9fd79a76 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -81,7 +81,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_BINFMT_MISC=y CONFIG_CMA=y CONFIG_ZSMALLOC=m -CONFIG_ZSMALLOC_PGTABLE_MAPPING=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/mach-imx/anatop.c b/arch/arm/mach-imx/anatop.c index d841bed8664d..7bb47eb3fc07 100644 --- a/arch/arm/mach-imx/anatop.c +++ b/arch/arm/mach-imx/anatop.c @@ -136,7 +136,7 @@ void __init imx_init_revision_from_anatop(void) src_np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-src"); - src_base = of_iomap(np, 0); + src_base = of_iomap(src_np, 0); of_node_put(src_np); WARN_ON(!src_base); sbmr2 = readl_relaxed(src_base + SRC_SBMR2); diff --git a/arch/arm/mach-keystone/memory.h b/arch/arm/mach-keystone/memory.h index 9147565d0581..1b9ed1271e05 100644 --- a/arch/arm/mach-keystone/memory.h +++ b/arch/arm/mach-keystone/memory.h @@ -6,9 +6,6 @@ #ifndef __MEMORY_H #define __MEMORY_H -#define MAX_PHYSMEM_BITS 36 -#define SECTION_SIZE_BITS 34 - #define KEYSTONE_LOW_PHYS_START 0x80000000ULL #define KEYSTONE_LOW_PHYS_SIZE 0x80000000ULL /* 2G */ #define KEYSTONE_LOW_PHYS_END (KEYSTONE_LOW_PHYS_START + \ diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index 144b9caa935c..a720259099ed 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -288,7 +288,7 @@ static struct gpiod_lookup_table osk_usb_gpio_table = { .dev_id = "ohci", .table = { /* Power GPIO on the I2C-attached TPS65010 */ - GPIO_LOOKUP("i2c-tps65010", 1, "power", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("tps65010", 0, "power", GPIO_ACTIVE_HIGH), GPIO_LOOKUP(OMAP_GPIO_LABEL, 9, "overcurrent", GPIO_ACTIVE_HIGH), }, diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 06da2747a90b..19635721013d 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-h2-plus", "allwinner,sun8i-h3", "allwinner,sun8i-r40", + "allwinner,sun8i-v3", "allwinner,sun8i-v3s", NULL, }; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..a6b5b7ef40ae 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -81,6 +81,7 @@ config ARM64 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) + select ARCH_WANT_LD_ORPHAN_WARN select ARCH_HAS_UBSAN_SANITIZE_ALL select ARM_AMBA select ARM_ARCH_TIMER diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 5789c2d18d43..6a87d592bd00 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -28,10 +28,6 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419 endif endif -# We never want expected sections to be placed heuristically by the -# linker. All sections should be explicitly named in the linker script. -LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) - ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y) ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y) $(warning LSE atomics not supported by binutils) diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts index 9ebb9e07fae3..d4069749d721 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts @@ -79,7 +79,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_dc1sw>; status = "okay"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts index 4f9ba53ffaae..9d93fe153689 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts @@ -96,7 +96,7 @@ pinctrl-0 = <&emac_rgmii_pins>; phy-supply = <®_gmac_3v3>; phy-handle = <&ext_rgmii_phy>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts index fceb298bfd53..29a081e72a9b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-one-plus.dts @@ -27,7 +27,7 @@ &emac { pinctrl-names = "default"; pinctrl-0 = <&ext_rgmii_pins>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&ext_rgmii_phy>; phy-supply = <®_gmac_3v3>; allwinner,rx-delay-ps = <200>; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index e9f13fe08492..5181872f9452 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -152,6 +152,7 @@ config PPC select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IRQS_OFF_ACTIVATE_MM + select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_TABLE_SORT diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 16b8336f91dd..5c8c06215dd4 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -123,7 +123,6 @@ endif LDFLAGS_vmlinux-y := -Bstatic LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) -LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn) ifdef CONFIG_PPC64 ifeq ($(call cc-option-yn,-mcmodel=medium),y) diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index e0b52940e43c..750918451dd2 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -242,6 +242,18 @@ extern void radix_init_pseries(void); static inline void radix_init_pseries(void) { }; #endif +#ifdef CONFIG_HOTPLUG_CPU +#define arch_clear_mm_cpumask_cpu(cpu, mm) \ + do { \ + if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \ + atomic_dec(&(mm)->context.active_cpus); \ + cpumask_clear_cpu(cpu, mm_cpumask(mm)); \ + } \ + } while (0) + +void cleanup_cpu_mmu_context(void); +#endif + static inline int get_user_context(mm_context_t *ctx, unsigned long ea) { int index = ea >> MAX_EA_BITS_PER_CONTEXT; diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 85215e79db42..a0ebc29f30b2 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1214,12 +1214,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) { /* We have a block of xive->nr_servers VPs. We just need to check - * raw vCPU ids are below the expected limit for this guest's - * core stride ; kvmppc_pack_vcpu_id() will pack them down to an - * index that can be safely used to compute a VP id that belongs - * to the VP block. + * packed vCPU ids are below that. */ - return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode; + return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; } int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 5e147986400d..55b4a8bd408a 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -5,7 +5,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) -obj-y := fault.o mem.o pgtable.o mmap.o \ +obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \ init_$(BITS).o pgtable_$(BITS).o \ pgtable-frag.o ioremap.o ioremap_$(BITS).o \ init-common.o mmu_context.o drmem.o diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c index 0203cdf48c54..52e170bd95ae 100644 --- a/arch/powerpc/mm/book3s64/hash_native.c +++ b/arch/powerpc/mm/book3s64/hash_native.c @@ -68,7 +68,7 @@ static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned in rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) - : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) + : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r) : "memory"); } @@ -92,16 +92,15 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) asm volatile("ptesync": : :"memory"); /* - * Flush the first set of the TLB, and any caching of partition table - * entries. Then flush the remaining sets of the TLB. Hash mode uses - * partition scoped TLB translations. + * Flush the partition table cache if this is HV mode. */ - tlbiel_hash_set_isa300(0, is, 0, 2, 0); - for (set = 1; set < num_sets; set++) - tlbiel_hash_set_isa300(set, is, 0, 0, 0); + if (early_cpu_has_feature(CPU_FTR_HVMODE)) + tlbiel_hash_set_isa300(0, is, 0, 2, 0); /* - * Now invalidate the process table cache. + * Now invalidate the process table cache. UPRT=0 HPT modes (what + * current hardware implements) do not use the process table, but + * add the flushes anyway. * * From ISA v3.0B p. 1078: * The following forms are invalid. @@ -110,6 +109,14 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) */ tlbiel_hash_set_isa300(0, is, 0, 2, 1); + /* + * Then flush the sets of the TLB proper. Hash mode uses + * partition scoped TLB translations, which may be flushed + * in !HV mode. + */ + for (set = 0; set < num_sets; set++) + tlbiel_hash_set_isa300(set, is, 0, 0, 0); + ppc_after_tlbiel_barrier(); asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 1c54821de7bf..0c8557220ae2 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -17,6 +17,7 @@ #include <linux/export.h> #include <linux/gfp.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> @@ -307,3 +308,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) isync(); } #endif + +/** + * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined) + * + * This clears the CPU from mm_cpumask for all processes, and then flushes the + * local TLB to ensure TLB coherency in case the CPU is onlined again. + * + * KVM guest translations are not necessarily flushed here. If KVM started + * using mm_cpumask or the Linux APIs which do, this would have to be resolved. + */ +#ifdef CONFIG_HOTPLUG_CPU +void cleanup_cpu_mmu_context(void) +{ + int cpu = smp_processor_id(); + + clear_tasks_mm_cpumask(cpu); + tlbiel_all(); +} +#endif diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c new file mode 100644 index 000000000000..fa9a7a718fc6 --- /dev/null +++ b/arch/powerpc/mm/maccess.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/uaccess.h> +#include <linux/kernel.h> + +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + return is_kernel_addr((unsigned long)unsafe_src); +} diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 63f61d8b55e5..f2bf98bdcea2 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -742,8 +742,7 @@ static int __init parse_numa_properties(void) of_node_put(cpu); } - if (likely(nid > 0)) - node_set_online(nid); + node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 74ebe664b016..adae2a6712e1 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -911,6 +911,8 @@ static int smp_core99_cpu_disable(void) mpic_cpu_set_priority(0xf); + cleanup_cpu_mmu_context(); + return 0; } diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 46115231a3b2..4426a109ec2f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -211,11 +211,16 @@ static void __init pnv_init(void) add_preferred_console("hvc", 0, NULL); if (!radix_enabled()) { + size_t size = sizeof(struct slb_entry) * mmu_slb_size; int i; /* Allocate per cpu area to save old slb contents during MCE */ - for_each_possible_cpu(i) - paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i)); + for_each_possible_cpu(i) { + paca_ptrs[i]->mce_faulty_slbs = + memblock_alloc_node(size, + __alignof__(struct slb_entry), + cpu_to_node(i)); + } } } diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 54c4ba45c7ce..cbb67813cd5d 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void) xive_smp_disable_cpu(); else xics_migrate_irqs_away(); + + cleanup_cpu_mmu_context(); + return 0; } diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index f2837e33bf5d..a02012f1b04a 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -90,6 +90,9 @@ static int pseries_cpu_disable(void) xive_smp_disable_cpu(); else xics_migrate_irqs_away(); + + cleanup_cpu_mmu_context(); + return 0; } diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 133f6adcb39c..b3ac2455faad 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -458,7 +458,8 @@ again: return hwirq; } - virq = irq_create_mapping(NULL, hwirq); + virq = irq_create_mapping_affinity(NULL, hwirq, + entry->affinity); if (!virq) { pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index 0c0268e77155..d839956407a7 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -71,7 +71,7 @@ FUNC_NAME: /* %o0=src, %o1=dst, %o2=len */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 - mov 1, %o3 + mov -1, %o3 clr %o4 andcc %g1, 0x3, %g0 bne,pn %icc, 95f diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f6946b81f74a..fbf26e0f7a6a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -100,6 +100,7 @@ config X86 select ARCH_WANT_DEFAULT_BPF_JIT if X86_64 select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANT_HUGE_PMD_SHARE + select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_THP_SWAP if X86_64 select BUILDTIME_TABLE_SORT select CLKEVT_I8253 diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 154259f18b8b..1bf21746f4ce 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -209,9 +209,6 @@ ifdef CONFIG_X86_64 LDFLAGS_vmlinux += -z max-page-size=0x200000 endif -# We never want expected sections to be placed heuristically by the -# linker. All sections should be explicitly named in the linker script. -LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index ee249088cbfe..40b8fd375d52 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -61,7 +61,9 @@ KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info) # Compressed kernel should be built as PIE since it may be loaded at any # address by the bootloader. LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) -LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) +ifdef CONFIG_LD_ORPHAN_WARN +LDFLAGS_vmlinux += --orphan-handling=warn +endif LDFLAGS_vmlinux += -T hostprogs := mkpiggy diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c index 954cb2702e23..27826c265aab 100644 --- a/arch/x86/boot/compressed/sev-es.c +++ b/arch/x86/boot/compressed/sev-es.c @@ -32,13 +32,12 @@ struct ghcb *boot_ghcb; */ static bool insn_has_rep_prefix(struct insn *insn) { + insn_byte_t p; int i; insn_get_prefixes(insn); - for (i = 0; i < insn->prefixes.nbytes; i++) { - insn_byte_t p = insn->prefixes.bytes[i]; - + for_each_insn_prefix(insn, i, p) { if (p == 0xf2 || p == 0xf3) return true; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b47cc4226934..485c5066f8b8 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1916,7 +1916,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d * that caused the PEBS record. It's called collision. * If collision happened, the record will be dropped. */ - if (p->status != (1ULL << bit)) { + if (pebs_status != (1ULL << bit)) { for_each_set_bit(i, (unsigned long *)&pebs_status, size) error[i]++; continue; @@ -1940,7 +1940,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d if (error[bit]) { perf_log_lost_samples(event, error[bit]); - if (perf_event_account_interrupt(event)) + if (iregs && perf_event_account_interrupt(event)) x86_pmu_stop(event, 0); } diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 5c1ae3eff9d4..a8c3d284fa46 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +/** + * for_each_insn_prefix() -- Iterate prefixes in the instruction + * @insn: Pointer to struct insn. + * @idx: Index storage. + * @prefix: Prefix byte. + * + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix + * and the index is stored in @idx (note that this @idx is just for a cursor, + * do not change it.) + * Since prefixes.nbytes can be bigger than 4 if some prefixes + * are repeated, it cannot be used for looping over the prefixes. + */ +#define for_each_insn_prefix(insn, idx, prefix) \ + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) + #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 1b98f8c12b96..235f5cde06fc 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -161,7 +161,7 @@ static int __init early_set_hub_type(void) /* UV4/4A only have a revision difference */ case UV4_HUB_PART_NUMBER: uv_min_hub_revision_id = node_id.s.revision - + UV4_HUB_REVISION_BASE; + + UV4_HUB_REVISION_BASE - 1; uv_hub_type_set(UV4); if (uv_min_hub_revision_id == UV4A_HUB_REVISION_BASE) uv_hub_type_set(UV4|UV4A); diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e5f4ee8f4c3b..e8b5f1cf1ae8 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -570,6 +570,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) if (d) { cpumask_set_cpu(cpu, &d->cpu_mask); + if (r->cache.arch_has_per_cpu_cfg) + rdt_domain_reconfigure_cdp(r); return; } @@ -923,6 +925,7 @@ static __init void rdt_init_res_defs_intel(void) r->rid == RDT_RESOURCE_L2CODE) { r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_empty_bitmaps = false; + r->cache.arch_has_per_cpu_cfg = false; } else if (r->rid == RDT_RESOURCE_MBA) { r->msr_base = MSR_IA32_MBA_THRTL_BASE; r->msr_update = mba_wrmsr_intel; @@ -943,6 +946,7 @@ static __init void rdt_init_res_defs_amd(void) r->rid == RDT_RESOURCE_L2CODE) { r->cache.arch_has_sparse_bitmaps = true; r->cache.arch_has_empty_bitmaps = true; + r->cache.arch_has_per_cpu_cfg = true; } else if (r->rid == RDT_RESOURCE_MBA) { r->msr_base = MSR_IA32_MBA_BW_BASE; r->msr_update = mba_wrmsr_amd; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 80fa997fae60..f65d3c0dbc41 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -360,6 +360,8 @@ struct msr_param { * executing entities * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. + * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache + * level has CPU scope. */ struct rdt_cache { unsigned int cbm_len; @@ -369,6 +371,7 @@ struct rdt_cache { unsigned int shareable_bits; bool arch_has_sparse_bitmaps; bool arch_has_empty_bitmaps; + bool arch_has_per_cpu_cfg; }; /** diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6f4ca4bea625..f3418428682b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1909,8 +1909,13 @@ static int set_cache_qos_cfg(int level, bool enable) r_l = &rdt_resources_all[level]; list_for_each_entry(d, &r_l->domains, list) { - /* Pick one CPU from each domain instance to update MSR */ - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); + if (r_l->cache.arch_has_per_cpu_cfg) + /* Pick all the CPUs in the domain instance */ + for_each_cpu(cpu, &d->cpu_mask) + cpumask_set_cpu(cpu, cpu_mask); + else + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } cpu = get_cpu(); /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 3fdaa042823d..138bdb1fd136 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -255,12 +255,13 @@ static volatile u32 good_2byte_insns[256 / 32] = { static bool is_prefix_bad(struct insn *insn) { + insn_byte_t p; int i; - for (i = 0; i < insn->prefixes.nbytes; i++) { + for_each_insn_prefix(insn, i, p) { insn_attr_t attr; - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + attr = inat_get_opcode_attribute(p); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_ES): case INAT_MAKE_PREFIX(INAT_PFX_CS): @@ -715,6 +716,7 @@ static const struct uprobe_xol_ops push_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); + insn_byte_t p; int i; switch (opc1) { @@ -746,8 +748,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. * No one uses these insns, reject any branch insns with such prefix. */ - for (i = 0; i < insn->prefixes.nbytes; i++) { - if (insn->prefixes.bytes[i] == 0x66) + for_each_insn_prefix(insn, i, p) { + if (p == 0x66) return -ENOTSUPP; } diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 58f7fb95c7f4..4229950a5d78 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -63,13 +63,12 @@ static bool is_string_insn(struct insn *insn) */ bool insn_has_rep_prefix(struct insn *insn) { + insn_byte_t p; int i; insn_get_prefixes(insn); - for (i = 0; i < insn->prefixes.nbytes; i++) { - insn_byte_t p = insn->prefixes.bytes[i]; - + for_each_insn_prefix(insn, i, p) { if (p == 0xf2 || p == 0xf3) return true; } @@ -95,14 +94,15 @@ static int get_seg_reg_override_idx(struct insn *insn) { int idx = INAT_SEG_REG_DEFAULT; int num_overrides = 0, i; + insn_byte_t p; insn_get_prefixes(insn); /* Look for any segment override prefixes. */ - for (i = 0; i < insn->prefixes.nbytes; i++) { + for_each_insn_prefix(insn, i, p) { insn_attr_t attr; - attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]); + attr = inat_get_opcode_attribute(p); switch (attr) { case INAT_MAKE_PREFIX(INAT_PFX_CS): idx = INAT_SEG_REG_CS; diff --git a/block/blk-merge.c b/block/blk-merge.c index bcf5e4580603..97b7c2821565 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -144,7 +144,7 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q, static inline unsigned get_max_io_size(struct request_queue *q, struct bio *bio) { - unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); + unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); unsigned max_sectors = sectors; unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT; diff --git a/block/blk-settings.c b/block/blk-settings.c index 9741d1d83e98..659cdb8a07fe 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -547,7 +547,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); - t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors); + + /* Set non-power-of-2 compatible chunk_sectors boundary */ + if (b->chunk_sectors) + t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c index 669392f31d4e..6284aff434a1 100644 --- a/drivers/accessibility/speakup/spk_ttyio.c +++ b/drivers/accessibility/speakup/spk_ttyio.c @@ -47,27 +47,20 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty) { struct spk_ldisc_data *ldisc_data; + if (tty != speakup_tty) + /* Somebody tried to use this line discipline outside speakup */ + return -ENODEV; + if (!tty->ops->write) return -EOPNOTSUPP; - mutex_lock(&speakup_tty_mutex); - if (speakup_tty) { - mutex_unlock(&speakup_tty_mutex); - return -EBUSY; - } - speakup_tty = tty; - ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL); - if (!ldisc_data) { - speakup_tty = NULL; - mutex_unlock(&speakup_tty_mutex); + if (!ldisc_data) return -ENOMEM; - } init_completion(&ldisc_data->completion); ldisc_data->buf_free = true; - speakup_tty->disc_data = ldisc_data; - mutex_unlock(&speakup_tty_mutex); + tty->disc_data = ldisc_data; return 0; } @@ -191,9 +184,25 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) tty_unlock(tty); + mutex_lock(&speakup_tty_mutex); + speakup_tty = tty; ret = tty_set_ldisc(tty, N_SPEAKUP); if (ret) - pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + speakup_tty = NULL; + mutex_unlock(&speakup_tty_mutex); + + if (!ret) + /* Success */ + return 0; + + pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); + + tty_lock(tty); + if (tty->ops->close) + tty->ops->close(tty, NULL); + tty_unlock(tty); + + tty_kclose(tty); return ret; } diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 8d7001712062..040be48ce046 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 menu "Generic Driver Options" +config AUXILIARY_BUS + bool + config UEVENT_HELPER bool "Support for uevent helper" help diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 41369fc7004f..5e7bf9669a81 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -7,6 +7,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \ attribute_container.o transport_class.o \ topology.o container.o property.o cacheinfo.o \ swnode.o +obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ obj-$(CONFIG_ISA_BUS_API) += isa.o diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c new file mode 100644 index 000000000000..f303daadf843 --- /dev/null +++ b/drivers/base/auxiliary.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 Intel Corporation + * + * Please see Documentation/driver-api/auxiliary_bus.rst for more information. + */ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/string.h> +#include <linux/auxiliary_bus.h> + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + int match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */ + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +static int auxiliary_match(struct device *dev, struct device_driver *drv) +{ + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv); + + return !!auxiliary_match_id(auxdrv->id_table, auxdev); +} + +static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *name, *p; + + name = dev_name(dev); + p = strrchr(name, '.'); + + return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX, + (int)(p - name), name); +} + +static const struct dev_pm_ops auxiliary_dev_pm_ops = { + SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) +}; + +static int auxiliary_bus_probe(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + int ret; + + ret = dev_pm_domain_attach(dev, true); + if (ret) { + dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); + return ret; + } + + ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev)); + if (ret) + dev_pm_domain_detach(dev, true); + + return ret; +} + +static int auxiliary_bus_remove(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->remove) + auxdrv->remove(auxdev); + dev_pm_domain_detach(dev, true); + + return 0; +} + +static void auxiliary_bus_shutdown(struct device *dev) +{ + struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver); + struct auxiliary_device *auxdev = to_auxiliary_dev(dev); + + if (auxdrv->shutdown) + auxdrv->shutdown(auxdev); +} + +static struct bus_type auxiliary_bus_type = { + .name = "auxiliary", + .probe = auxiliary_bus_probe, + .remove = auxiliary_bus_remove, + .shutdown = auxiliary_bus_shutdown, + .match = auxiliary_match, + .uevent = auxiliary_uevent, + .pm = &auxiliary_dev_pm_ops, +}; + +/** + * auxiliary_device_init - check auxiliary_device and initialize + * @auxdev: auxiliary device struct + * + * This is the first step in the two-step process to register an + * auxiliary_device. + * + * When this function returns an error code, then the device_initialize will + * *not* have been performed, and the caller will be responsible to free any + * memory allocated for the auxiliary_device in the error path directly. + * + * It returns 0 on success. On success, the device_initialize has been + * performed. After this point any error unwinding will need to include a call + * to auxiliary_device_uninit(). In this post-initialize error scenario, a call + * to the device's .release callback will be triggered, and all memory clean-up + * is expected to be handled there. + */ +int auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + dev->bus = &auxiliary_bus_type; + device_initialize(&auxdev->dev); + return 0; +} +EXPORT_SYMBOL_GPL(auxiliary_device_init); + +/** + * __auxiliary_device_add - add an auxiliary bus device + * @auxdev: auxiliary bus device to add to the bus + * @modname: name of the parent device's driver module + * + * This is the second step in the two-step process to register an + * auxiliary_device. + * + * This function must be called after a successful call to + * auxiliary_device_init(), which will perform the device_initialize. This + * means that if this returns an error code, then a call to + * auxiliary_device_uninit() must be performed so that the .release callback + * will be triggered to free the memory associated with the auxiliary_device. + * + * The expectation is that users will call the "auxiliary_device_add" macro so + * that the caller's KBUILD_MODNAME is automatically inserted for the modname + * parameter. Only if a user requires a custom name would this version be + * called directly. + */ +int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname) +{ + struct device *dev = &auxdev->dev; + int ret; + + if (!modname) { + dev_err(dev, "auxiliary device modname is NULL\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + ret = device_add(dev); + if (ret) + dev_err(dev, "adding auxiliary device failed!: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL_GPL(__auxiliary_device_add); + +/** + * auxiliary_find_device - auxiliary device iterator for locating a particular device. + * @start: Device to begin with + * @data: Data to pass to match function + * @match: Callback function to check device + * + * This function returns a reference to a device that is 'found' + * for later use, as determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + */ +struct auxiliary_device *auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct device *dev; + + dev = bus_find_device(&auxiliary_bus_type, start, data, match); + if (!dev) + return NULL; + + return to_auxiliary_dev(dev); +} +EXPORT_SYMBOL_GPL(auxiliary_find_device); + +/** + * __auxiliary_driver_register - register a driver for auxiliary bus devices + * @auxdrv: auxiliary_driver structure + * @owner: owning module/driver + * @modname: KBUILD_MODNAME for parent driver + */ +int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, + struct module *owner, const char *modname) +{ + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname); + if (!auxdrv->driver.name) + return -ENOMEM; + + auxdrv->driver.owner = owner; + auxdrv->driver.bus = &auxiliary_bus_type; + auxdrv->driver.mod_name = modname; + + return driver_register(&auxdrv->driver); +} +EXPORT_SYMBOL_GPL(__auxiliary_driver_register); + +/** + * auxiliary_driver_unregister - unregister a driver + * @auxdrv: auxiliary_driver structure + */ +void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + driver_unregister(&auxdrv->driver); + kfree(auxdrv->driver.name); +} +EXPORT_SYMBOL_GPL(auxiliary_driver_unregister); + +static int __init auxiliary_bus_init(void) +{ + return bus_register(&auxiliary_bus_type); +} + +static void __exit auxiliary_bus_exit(void) +{ + bus_unregister(&auxiliary_bus_type); +} + +module_init(auxiliary_bus_init); +module_exit(auxiliary_bus_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Auxiliary Bus"); +MODULE_AUTHOR("David Ertman <[email protected]>"); +MODULE_AUTHOR("Kiran Patil <[email protected]>"); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 88ce5f0ffc4b..41ff2071d7ef 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -437,31 +437,38 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver tlv = (struct intel_tlv *)skb->data; switch (tlv->type) { case INTEL_TLV_CNVI_TOP: - version->cnvi_top = get_unaligned_le32(tlv->val); + version->cnvi_top = + __le32_to_cpu(get_unaligned_le32(tlv->val)); break; case INTEL_TLV_CNVR_TOP: - version->cnvr_top = get_unaligned_le32(tlv->val); + version->cnvr_top = + __le32_to_cpu(get_unaligned_le32(tlv->val)); break; case INTEL_TLV_CNVI_BT: - version->cnvi_bt = get_unaligned_le32(tlv->val); + version->cnvi_bt = + __le32_to_cpu(get_unaligned_le32(tlv->val)); break; case INTEL_TLV_CNVR_BT: - version->cnvr_bt = get_unaligned_le32(tlv->val); + version->cnvr_bt = + __le32_to_cpu(get_unaligned_le32(tlv->val)); break; case INTEL_TLV_DEV_REV_ID: - version->dev_rev_id = get_unaligned_le16(tlv->val); + version->dev_rev_id = + __le16_to_cpu(get_unaligned_le16(tlv->val)); break; case INTEL_TLV_IMAGE_TYPE: version->img_type = tlv->val[0]; break; case INTEL_TLV_TIME_STAMP: - version->timestamp = get_unaligned_le16(tlv->val); + version->timestamp = + __le16_to_cpu(get_unaligned_le16(tlv->val)); break; case INTEL_TLV_BUILD_TYPE: version->build_type = tlv->val[0]; break; case INTEL_TLV_BUILD_NUM: - version->build_num = get_unaligned_le32(tlv->val); + version->build_num = + __le32_to_cpu(get_unaligned_le32(tlv->val)); break; case INTEL_TLV_SECURE_BOOT: version->secure_boot = tlv->val[0]; diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 78cc64b42b30..6511b091caf5 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -132,6 +132,12 @@ struct intel_debug_features { __u8 page1[16]; } __packed; +#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) +#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) +#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) +#define INTEL_CNVX_TOP_STEP(cnvx_top) (((cnvx_top) & 0x0f000000) >> 24) +#define INTEL_CNVX_TOP_PACK_SWAB(t, s) __swab16(((__u16)(((t) << 4) | (s)))) + #if IS_ENABLED(CONFIG_BT_INTEL) int btintel_check_bdaddr(struct hci_dev *hdev); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index ba45c59bd9f3..5f9f02795631 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -704,7 +704,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); - return err; + goto free_fw; } fw_ptr = fw->data; diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index ce9dcffdc5bf..f85a55add9be 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -14,12 +14,11 @@ #define VERSION "0.1" -int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version, +int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, enum qca_btsoc_type soc_type) { struct sk_buff *skb; struct edl_event_hdr *edl; - struct qca_btsoc_version *ver; char cmd; int err = 0; u8 event_type = HCI_EV_VENDOR; @@ -70,9 +69,9 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version, } if (soc_type >= QCA_WCN3991) - memmove(&edl->data, &edl->data[1], sizeof(*ver)); - - ver = (struct qca_btsoc_version *)(edl->data); + memcpy(ver, edl->data + 1, sizeof(*ver)); + else + memcpy(ver, &edl->data, sizeof(*ver)); bt_dev_info(hdev, "QCA Product ID :0x%08x", le32_to_cpu(ver->product_id)); @@ -83,13 +82,7 @@ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version, bt_dev_info(hdev, "QCA Patch Version:0x%08x", le16_to_cpu(ver->patch_ver)); - /* QCA chipset version can be decided by patch and SoC - * version, combination with upper 2 bytes from SoC - * and lower 2 bytes from patch will be used. - */ - *soc_version = (le32_to_cpu(ver->soc_id) << 16) | - (le16_to_cpu(ver->rom_ver) & 0x0000ffff); - if (*soc_version == 0) + if (ver->soc_id == 0 || ver->rom_ver == 0) err = -EILSEQ; out: @@ -446,15 +439,20 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, - enum qca_btsoc_type soc_type, u32 soc_ver, + enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, const char *firmware_name) { struct qca_fw_config config; int err; u8 rom_ver = 0; + u32 soc_ver; bt_dev_dbg(hdev, "QCA setup on UART"); + soc_ver = get_soc_ver(ver.soc_id, ver.rom_ver); + + bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); + config.user_baud_rate = baudrate; /* Download rampatch file */ @@ -491,9 +489,15 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, if (firmware_name) snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name); - else if (qca_is_wcn399x(soc_type)) - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02x.bin", rom_ver); + else if (qca_is_wcn399x(soc_type)) { + if (ver.soc_id == QCA_WCN3991_SOC_ID) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02xu.bin", rom_ver); + } else { + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x.bin", rom_ver); + } + } else if (soc_type == QCA_QCA6390) snprintf(config.fwname, sizeof(config.fwname), "qca/htnv%02x.bin", rom_ver); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index d81b74c408a5..e73b8f8775bd 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -34,6 +34,18 @@ #define QCA_HCI_CC_OPCODE 0xFC00 #define QCA_HCI_CC_SUCCESS 0x00 +#define QCA_WCN3991_SOC_ID (0x40014320) + +/* QCA chipset version can be decided by patch and SoC + * version, combination with upper 2 bytes from SoC + * and lower 2 bytes from patch will be used. + */ +#define get_soc_ver(soc_id, rom_ver) \ + ((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver))) + +#define QCA_FW_BUILD_VER_LEN 255 + + enum qca_baudrate { QCA_BAUDRATE_115200 = 0, QCA_BAUDRATE_57600, @@ -136,9 +148,9 @@ enum qca_btsoc_type { int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, - enum qca_btsoc_type soc_type, u32 soc_ver, + enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, const char *firmware_name); -int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version, +int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, enum qca_btsoc_type); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); int qca_send_pre_shutdown_cmd(struct hci_dev *hdev); @@ -155,13 +167,15 @@ static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdad } static inline int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, - enum qca_btsoc_type soc_type, u32 soc_ver, + enum qca_btsoc_type soc_type, + struct qca_btsoc_version ver, const char *firmware_name) { return -EOPNOTSUPP; } -static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version, +static inline int qca_read_soc_version(struct hci_dev *hdev, + struct qca_btsoc_version *ver, enum qca_btsoc_type) { return -EOPNOTSUPP; diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 3a9afc905f24..a4f7cace66b0 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -18,23 +18,25 @@ #define VERSION "0.1" #define RTL_EPATCH_SIGNATURE "Realtech" -#define RTL_ROM_LMP_3499 0x3499 #define RTL_ROM_LMP_8723A 0x1200 #define RTL_ROM_LMP_8723B 0x8723 -#define RTL_ROM_LMP_8723D 0x8873 #define RTL_ROM_LMP_8821A 0x8821 #define RTL_ROM_LMP_8761A 0x8761 #define RTL_ROM_LMP_8822B 0x8822 +#define RTL_ROM_LMP_8852A 0x8852 #define RTL_CONFIG_MAGIC 0x8723ab55 #define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_HCIREV (1 << 1) #define IC_MATCH_FL_HCIVER (1 << 2) #define IC_MATCH_FL_HCIBUS (1 << 3) -#define IC_INFO(lmps, hcir) \ - .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \ +#define IC_INFO(lmps, hcir, hciv, bus) \ + .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \ + IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \ .lmp_subver = (lmps), \ - .hci_rev = (hcir) + .hci_rev = (hcir), \ + .hci_ver = (hciv), \ + .hci_bus = (bus) struct id_table { __u16 match_flags; @@ -55,119 +57,100 @@ struct btrtl_device_info { int fw_len; u8 *cfg_data; int cfg_len; + bool drop_fw; }; static const struct id_table ic_id_table[] = { - { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8723A, 0x0, - .config_needed = false, - .has_rom_version = false, - .fw_name = "rtl_bt/rtl8723a_fw.bin", - .cfg_name = NULL }, - - { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_3499, 0x0, + /* 8723A */ + { IC_INFO(RTL_ROM_LMP_8723A, 0xb, 0x6, HCI_USB), .config_needed = false, .has_rom_version = false, .fw_name = "rtl_bt/rtl8723a_fw.bin", .cfg_name = NULL }, /* 8723BS */ - { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | - IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, - .lmp_subver = RTL_ROM_LMP_8723B, - .hci_rev = 0xb, - .hci_ver = 6, - .hci_bus = HCI_UART, + { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_UART), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723bs_fw.bin", .cfg_name = "rtl_bt/rtl8723bs_config" }, /* 8723B */ - { IC_INFO(RTL_ROM_LMP_8723B, 0xb), + { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723b_fw.bin", .cfg_name = "rtl_bt/rtl8723b_config" }, /* 8723D */ - { IC_INFO(RTL_ROM_LMP_8723B, 0xd), + { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723d_fw.bin", .cfg_name = "rtl_bt/rtl8723d_config" }, /* 8723DS */ - { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | - IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, - .lmp_subver = RTL_ROM_LMP_8723B, - .hci_rev = 0xd, - .hci_ver = 8, - .hci_bus = HCI_UART, + { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_UART), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723ds_fw.bin", .cfg_name = "rtl_bt/rtl8723ds_config" }, - /* 8723DU */ - { IC_INFO(RTL_ROM_LMP_8723D, 0x826C), - .config_needed = true, - .has_rom_version = true, - .fw_name = "rtl_bt/rtl8723d_fw.bin", - .cfg_name = "rtl_bt/rtl8723d_config" }, - /* 8821A */ - { IC_INFO(RTL_ROM_LMP_8821A, 0xa), + { IC_INFO(RTL_ROM_LMP_8821A, 0xa, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8821a_fw.bin", .cfg_name = "rtl_bt/rtl8821a_config" }, /* 8821C */ - { IC_INFO(RTL_ROM_LMP_8821A, 0xc), + { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8821c_fw.bin", .cfg_name = "rtl_bt/rtl8821c_config" }, /* 8761A */ - { IC_INFO(RTL_ROM_LMP_8761A, 0xa), + { IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8761a_fw.bin", .cfg_name = "rtl_bt/rtl8761a_config" }, /* 8761B */ - { IC_INFO(RTL_ROM_LMP_8761A, 0xb), + { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8761b_fw.bin", .cfg_name = "rtl_bt/rtl8761b_config" }, /* 8822C with UART interface */ - { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | - IC_MATCH_FL_HCIBUS, - .lmp_subver = RTL_ROM_LMP_8822B, - .hci_rev = 0x000c, - .hci_ver = 0x0a, - .hci_bus = HCI_UART, + { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8822cs_fw.bin", .cfg_name = "rtl_bt/rtl8822cs_config" }, /* 8822C with USB interface */ - { IC_INFO(RTL_ROM_LMP_8822B, 0xc), + { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8822cu_fw.bin", .cfg_name = "rtl_bt/rtl8822cu_config" }, /* 8822B */ - { IC_INFO(RTL_ROM_LMP_8822B, 0xb), + { IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8822b_fw.bin", .cfg_name = "rtl_bt/rtl8822b_config" }, + + /* 8852A */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8852au_fw.bin", + .cfg_name = "rtl_bt/rtl8852au_config" }, }; static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, @@ -275,6 +258,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ { RTL_ROM_LMP_8822B, 13 }, /* 8822C */ { RTL_ROM_LMP_8761A, 14 }, /* 8761B */ + { RTL_ROM_LMP_8852A, 18 }, /* 8852A */ }; min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; @@ -563,6 +547,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, u16 hci_rev, lmp_subver; u8 hci_ver; int ret; + u16 opcode; + u8 cmd[2]; btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); if (!btrtl_dev) { @@ -584,6 +570,49 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hci_ver = resp->hci_ver; hci_rev = le16_to_cpu(resp->hci_rev); lmp_subver = le16_to_cpu(resp->lmp_subver); + + if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c && + resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e) + btrtl_dev->drop_fw = true; + + if (btrtl_dev->drop_fw) { + opcode = hci_opcode_pack(0x3f, 0x66); + cmd[0] = opcode & 0xff; + cmd[1] = opcode >> 8; + + skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); + if (!skb) + goto out_free; + + skb_put_data(skb, cmd, sizeof(cmd)); + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + + hdev->send(hdev, skb); + + /* Ensure the above vendor command is sent to controller and + * process has done. + */ + msleep(200); + + /* Read the local version again. Expect to have the vanilla + * version as cold boot. + */ + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) { + ret = PTR_ERR(skb); + goto err_free; + } + + resp = (struct hci_rp_read_local_version *)skb->data; + rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", + resp->hci_ver, resp->hci_rev, + resp->lmp_ver, resp->lmp_subver); + + hci_ver = resp->hci_ver; + hci_rev = le16_to_cpu(resp->hci_rev); + lmp_subver = le16_to_cpu(resp->lmp_subver); + } +out_free: kfree_skb(skb); btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, @@ -654,12 +683,12 @@ int btrtl_download_firmware(struct hci_dev *hdev, switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8723A: - case RTL_ROM_LMP_3499: return btrtl_setup_rtl8723a(hdev, btrtl_dev); case RTL_ROM_LMP_8723B: case RTL_ROM_LMP_8821A: case RTL_ROM_LMP_8761A: case RTL_ROM_LMP_8822B: + case RTL_ROM_LMP_8852A: return btrtl_setup_rtl8723b(hdev, btrtl_dev); default: rtl_dev_info(hdev, "assuming no firmware upload needed"); @@ -835,3 +864,5 @@ MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin"); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1005b6e8ff74..03b83aa91277 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -60,6 +60,7 @@ static struct usb_driver btusb_driver; #define BTUSB_WIDEBAND_SPEECH 0x400000 #define BTUSB_VALID_LE_STATES 0x800000 #define BTUSB_QCA_WCN6855 0x1000000 +#define BTUSB_INTEL_NEWGEN 0x2000000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -365,7 +366,7 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW | BTUSB_WIDEBAND_SPEECH }, - { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEW | + { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_NEWGEN | BTUSB_WIDEBAND_SPEECH}, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, @@ -386,6 +387,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + /* Realtek 8852AE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), .driver_info = BTUSB_REALTEK }, @@ -394,6 +399,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01), .driver_info = BTUSB_MEDIATEK }, + /* Additional MediaTek MT7615E Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, + /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, @@ -425,8 +433,26 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8822CE Bluetooth devices */ - { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK }, - { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3553), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3555), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2ff8, 0x3051), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x1358, 0xc123), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xc123), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, @@ -1763,9 +1789,12 @@ static int btusb_setup_bcm92035(struct hci_dev *hdev) static int btusb_setup_csr(struct hci_dev *hdev) { + struct btusb_data *data = hci_get_drvdata(hdev); + u16 bcdDevice = le16_to_cpu(data->udev->descriptor.bcdDevice); struct hci_rp_read_local_version *rp; struct sk_buff *skb; bool is_fake = false; + int ret; BT_DBG("%s", hdev->name); @@ -1832,6 +1861,12 @@ static int btusb_setup_csr(struct hci_dev *hdev) le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0) is_fake = true; + /* Other clones which beat all the above checks */ + else if (bcdDevice == 0x0134 && + le16_to_cpu(rp->lmp_subver) == 0x0c5c && + le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_2_0) + is_fake = true; + if (is_fake) { bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds..."); @@ -1848,6 +1883,43 @@ static int btusb_setup_csr(struct hci_dev *hdev) */ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + /* + * Special workaround for clones with a Barrot 8041a02 chip, + * these clones are really messed-up: + * 1. Their bulk rx endpoint will never report any data unless + * the device was suspended at least once (yes really). + * 2. They will not wakeup when autosuspended and receiving data + * on their bulk rx endpoint from e.g. a keyboard or mouse + * (IOW remote-wakeup support is broken for the bulk endpoint). + * + * To fix 1. enable runtime-suspend, force-suspend the + * hci and then wake-it up by disabling runtime-suspend. + * + * To fix 2. clear the hci's can_wake flag, this way the hci + * will still be autosuspended when it is not open. + */ + if (bcdDevice == 0x8891 && + le16_to_cpu(rp->lmp_subver) == 0x1012 && + le16_to_cpu(rp->hci_rev) == 0x0810 && + le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) { + bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n"); + + pm_runtime_allow(&data->udev->dev); + + ret = pm_runtime_suspend(&data->udev->dev); + if (ret >= 0) + msleep(200); + else + bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n"); + + pm_runtime_forbid(&data->udev->dev); + + device_set_wakeup_capable(&data->udev->dev, false); + /* Re-enable autosuspend if this was requested */ + if (enable_autosuspend) + usb_enable_autosuspend(data->udev); + } } kfree_skb(skb); @@ -2359,6 +2431,182 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, return true; } +static void btusb_setup_intel_newgen_get_fw_name(const struct intel_version_tlv *ver_tlv, + char *fw_name, size_t len, + const char *suffix) +{ + /* The firmware file name for new generation controllers will be + * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step> + */ + snprintf(fw_name, len, "intel/ibt-%04x-%04x.%s", + INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver_tlv->cnvi_top), + INTEL_CNVX_TOP_STEP(ver_tlv->cnvi_top)), + INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver_tlv->cnvr_top), + INTEL_CNVX_TOP_STEP(ver_tlv->cnvr_top)), + suffix); +} + +static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev, + struct intel_version_tlv *ver, + u32 *boot_param) +{ + const struct firmware *fw; + char fwname[64]; + int err; + struct btusb_data *data = hci_get_drvdata(hdev); + + if (!ver || !boot_param) + return -EINVAL; + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (INTEL_HW_PLATFORM(ver->cnvi_bt) != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", + INTEL_HW_PLATFORM(ver->cnvi_bt)); + return -EINVAL; + } + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x03 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->img_type == 0x03) { + clear_bit(BTUSB_BOOTLOADER, &data->flags); + btintel_check_bdaddr(hdev); + return 0; + } + + /* Check for supported iBT hardware variants of this firmware + * loading method. + * + * This check has been put in place to ensure correct forward + * compatibility options when newer hardware variants come along. + */ + switch (INTEL_HW_VARIANT(ver->cnvi_bt)) { + case 0x17: /* TyP */ + case 0x18: /* Slr */ + case 0x19: /* Slr-F */ + break; + default: + bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", + INTEL_HW_VARIANT(ver->cnvi_bt)); + return -EINVAL; + } + + /* If the device is not in bootloader mode, then the only possible + * choice is to return an error and abort the device initialization. + */ + if (ver->img_type != 0x01) { + bt_dev_err(hdev, "Unsupported Intel firmware variant (0x%x)", + ver->img_type); + return -ENODEV; + } + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (ver->limited_cce != 0x00) { + bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", + ver->limited_cce); + return -EINVAL; + } + + /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ + if (ver->sbe_type > 0x01) { + bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", + ver->sbe_type); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) { + bt_dev_info(hdev, "No device address configured"); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi"); + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); + return err; + } + + bt_dev_info(hdev, "Found device firmware: %s", fwname); + + if (fw->size < 644) { + bt_dev_err(hdev, "Invalid size of firmware file (%zu)", + fw->size); + err = -EBADF; + goto done; + } + + set_bit(BTUSB_DOWNLOADING, &data->flags); + + /* Start firmware downloading and get boot parameter */ + err = btintel_download_firmware_newgen(hdev, fw, boot_param, + INTEL_HW_VARIANT(ver->cnvi_bt), + ver->sbe_type); + if (err < 0) { + /* When FW download fails, send Intel Reset to retry + * FW download. + */ + btintel_reset_to_bootloader(hdev); + goto done; + } + set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); + + bt_dev_info(hdev, "Waiting for firmware download to complete"); + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * BTUSB_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(5000)); + if (err == -EINTR) { + bt_dev_err(hdev, "Firmware loading interrupted"); + goto done; + } + + if (err) { + bt_dev_err(hdev, "Firmware loading timeout"); + err = -ETIMEDOUT; + btintel_reset_to_bootloader(hdev); + goto done; + } + + if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { + bt_dev_err(hdev, "Firmware loading failed"); + err = -ENOEXEC; + goto done; + } + +done: + release_firmware(fw); + return err; +} + static int btusb_intel_download_firmware(struct hci_dev *hdev, struct intel_version *ver, struct intel_boot_params *params, @@ -2693,6 +2941,134 @@ finish: return 0; } +static int btusb_setup_intel_newgen(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + u32 boot_param; + char ddcname[64]; + ktime_t calltime, delta, rettime; + unsigned long long duration; + int err; + struct intel_debug_features features; + struct intel_version_tlv version; + + bt_dev_dbg(hdev, ""); + + /* Set the default boot parameter to 0x0 and it is updated to + * SKU specific boot parameter after reading Intel_Write_Boot_Params + * command while downloading the firmware. + */ + boot_param = 0x00000000; + + calltime = ktime_get(); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + err = btintel_read_version_tlv(hdev, &version); + if (err) { + bt_dev_err(hdev, "Intel Read version failed (%d)", err); + btintel_reset_to_bootloader(hdev); + return err; + } + + btintel_version_info_tlv(hdev, &version); + + err = btusb_intel_download_firmware_newgen(hdev, &version, &boot_param); + if (err) + return err; + + /* check if controller is already having an operational firmware */ + if (version.img_type == 0x03) + goto finish; + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); + + calltime = ktime_get(); + + set_bit(BTUSB_BOOTING, &data->flags); + + err = btintel_send_intel_reset(hdev, boot_param); + if (err) { + bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); + btintel_reset_to_bootloader(hdev); + return err; + } + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + bt_dev_info(hdev, "Waiting for device to boot"); + + err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, + TASK_INTERRUPTIBLE, + msecs_to_jiffies(1000)); + + if (err == -EINTR) { + bt_dev_err(hdev, "Device boot interrupted"); + return -EINTR; + } + + if (err) { + bt_dev_err(hdev, "Device boot timeout"); + btintel_reset_to_bootloader(hdev); + return -ETIMEDOUT; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device booted in %llu usecs", duration); + + clear_bit(BTUSB_BOOTLOADER, &data->flags); + + btusb_setup_intel_newgen_get_fw_name(&version, ddcname, sizeof(ddcname), + "ddc"); + /* Once the device is running in operational mode, it needs to + * apply the device configuration (DDC) parameters. + * + * The device can work without DDC parameters, so even if it + * fails to load the file, no need to fail the setup. + */ + btintel_load_ddc_config(hdev, ddcname); + + /* Read the Intel supported features and if new exception formats + * supported, need to load the additional DDC config to enable. + */ + btintel_read_debug_features(hdev, &features); + + /* Set DDC mask for available debug features */ + btintel_set_debug_features(hdev, &features); + + /* Read the Intel version information after loading the FW */ + err = btintel_read_version_tlv(hdev, &version); + if (err) + return err; + + btintel_version_info_tlv(hdev, &version); + +finish: + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. It + * does not enable any debugging related events. + * + * The device will function correctly without these events enabled + * and thus no need to fail the setup. + */ + btintel_set_event_mask(hdev, false); + + return 0; +} static int btusb_shutdown_intel(struct hci_dev *hdev) { struct sk_buff *skb; @@ -3067,7 +3443,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); - return err; + goto err_release_fw; } fw_ptr = fw->data; @@ -3444,12 +3820,14 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev, #define QCA_SYSCFG_UPDATED 0x40 #define QCA_PATCH_UPDATED 0x80 #define QCA_DFU_TIMEOUT 3000 +#define QCA_FLAG_MULTI_NVM 0x80 struct qca_version { __le32 rom_version; __le32 patch_version; __le32 ram_version; - __le32 ref_clock; + __le16 board_id; + __le16 flag; __u8 reserved[4]; } __packed; @@ -3632,8 +4010,14 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, char fwname[64]; int err; - snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin", - le32_to_cpu(ver->rom_version)); + if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { + snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin", + le32_to_cpu(ver->rom_version), + le16_to_cpu(ver->board_id)); + } else { + snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin", + le32_to_cpu(ver->rom_version)); + } err = request_firmware(&fw, fwname, &hdev->dev); if (err) { @@ -3700,6 +4084,11 @@ static int btusb_setup_qca(struct hci_dev *hdev) return err; } + err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)); + if (err < 0) + return err; + if (!(status & QCA_SYSCFG_UPDATED)) { err = btusb_setup_qca_load_nvm(hdev, &ver, info); if (err < 0) @@ -4078,6 +4467,24 @@ static int btusb_probe(struct usb_interface *intf, set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); } + if (id->driver_info & BTUSB_INTEL_NEWGEN) { + hdev->manufacturer = 2; + hdev->send = btusb_send_frame_intel; + hdev->setup = btusb_setup_intel_newgen; + hdev->shutdown = btusb_shutdown_intel_new; + hdev->hw_error = btintel_hw_error; + hdev->set_diag = btintel_set_diag; + hdev->set_bdaddr = btintel_set_bdaddr; + hdev->cmd_timeout = btusb_intel_cmd_timeout; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); + + data->recv_event = btusb_recv_event_intel; + data->recv_bulk = btusb_recv_bulk_intel; + set_bit(BTUSB_BOOTLOADER, &data->flags); + } + if (id->driver_info & BTUSB_MARVELL) hdev->set_bdaddr = btusb_set_bdaddr_marvell; diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 981d96cc7695..7be16a7f653b 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -245,6 +245,9 @@ static int h5_close(struct hci_uart *hu) skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); + kfree_skb(h5->rx_skb); + h5->rx_skb = NULL; + if (h5->vnd && h5->vnd->close) h5->vnd->close(h5); @@ -1001,6 +1004,7 @@ static struct h5_vnd rtl_vnd = { #ifdef CONFIG_ACPI static const struct acpi_device_id h5_acpi_match[] = { #ifdef CONFIG_BT_HCIUART_RTL + { "OBDA0623", (kernel_ulong_t)&rtl_vnd }, { "OBDA8723", (kernel_ulong_t)&rtl_vnd }, #endif { }, diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 8bfe024d1fcd..eb1e736efeeb 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -626,6 +626,7 @@ static int ll_setup(struct hci_uart *hu) gpiod_set_value_cansleep(lldev->enable_gpio, 0); msleep(5); gpiod_set_value_cansleep(lldev->enable_gpio, 1); + mdelay(100); err = serdev_device_wait_for_cts(serdev, true, 200); if (err) { bt_dev_err(hu->hdev, "Failed to get CTS"); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 244b8feba523..4a963682c702 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -50,6 +50,8 @@ #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define CMD_TRANS_TIMEOUT_MS 100 #define MEMDUMP_TIMEOUT_MS 8000 +#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000) +#define FW_DOWNLOAD_TIMEOUT_MS 3000 /* susclk rate */ #define SUSCLK_RATE_32KHZ 32768 @@ -68,16 +70,18 @@ #define QCA_MEMDUMP_BYTE 0xFB enum qca_flags { - QCA_IBS_ENABLED, + QCA_IBS_DISABLED, QCA_DROP_VENDOR_EVENT, QCA_SUSPENDING, QCA_MEMDUMP_COLLECTION, QCA_HW_ERROR_EVENT, - QCA_SSR_TRIGGERED + QCA_SSR_TRIGGERED, + QCA_BT_OFF }; enum qca_capabilities { QCA_CAP_WIDEBAND_SPEECH = BIT(0), + QCA_CAP_VALID_LE_STATES = BIT(1), }; /* HCI_IBS transmit side sleep protocol states */ @@ -630,7 +634,7 @@ static void qca_debugfs_init(struct hci_dev *hdev) ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); /* read only */ - mode = S_IRUGO; + mode = 0444; debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, @@ -657,7 +661,7 @@ static void qca_debugfs_init(struct hci_dev *hdev) debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); /* read/write */ - mode = S_IRUGO | S_IWUSR; + mode = 0644; debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); debugfs_create_u32("tx_idle_delay", mode, ibs_dir, &qca->tx_idle_delay); @@ -869,7 +873,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) * Out-Of-Band(GPIOs control) sleep is selected. * Don't wake the device up when suspending. */ - if (!test_bit(QCA_IBS_ENABLED, &qca->flags) || + if (test_bit(QCA_IBS_DISABLED, &qca->flags) || test_bit(QCA_SUSPENDING, &qca->flags)) { skb_queue_tail(&qca->txq, skb); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); @@ -1014,7 +1018,7 @@ static void qca_controller_memdump(struct work_struct *work) * the controller to send the dump is 8 seconds. let us * start timer to handle this asynchronous activity. */ - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); dump = (void *) skb->data; dump_size = __le32_to_cpu(dump->dump_size); @@ -1301,7 +1305,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) /* Give the controller time to process the request */ if (qca_is_wcn399x(qca_soc_type(hu))) - msleep(10); + usleep_range(1000, 10000); else msleep(300); @@ -1349,7 +1353,7 @@ static int qca_send_power_pulse(struct hci_uart *hu, bool on) if (on) msleep(100); else - msleep(10); + usleep_range(1000, 10000); return 0; } @@ -1618,6 +1622,7 @@ static int qca_power_on(struct hci_dev *hdev) struct hci_uart *hu = hci_get_drvdata(hdev); enum qca_btsoc_type soc_type = qca_soc_type(hu); struct qca_serdev *qcadev; + struct qca_data *qca = hu->priv; int ret = 0; /* Non-serdev device usually is powered by external power @@ -1637,6 +1642,7 @@ static int qca_power_on(struct hci_dev *hdev) } } + clear_bit(QCA_BT_OFF, &qca->flags); return ret; } @@ -1649,14 +1655,14 @@ static int qca_setup(struct hci_uart *hu) enum qca_btsoc_type soc_type = qca_soc_type(hu); const char *firmware_name = qca_get_firmware_name(hu); int ret; - int soc_ver = 0; + struct qca_btsoc_version ver; ret = qca_check_speeds(hu); if (ret) return ret; /* Patch downloading has to be done without IBS mode */ - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); /* Enable controller to do both LE scan and BR/EDR inquiry * simultaneously. @@ -1671,16 +1677,16 @@ static int qca_setup(struct hci_uart *hu) retry: ret = qca_power_on(hdev); if (ret) - return ret; + goto out; clear_bit(QCA_SSR_TRIGGERED, &qca->flags); if (qca_is_wcn399x(soc_type)) { set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); - ret = qca_read_soc_version(hdev, &soc_ver, soc_type); + ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) - return ret; + goto out; } else { qca_set_speed(hu, QCA_INIT_SPEED); } @@ -1690,24 +1696,23 @@ retry: if (speed) { ret = qca_set_speed(hu, QCA_OPER_SPEED); if (ret) - return ret; + goto out; qca_baudrate = qca_get_baudrate_value(speed); } if (!qca_is_wcn399x(soc_type)) { /* Get QCA version information */ - ret = qca_read_soc_version(hdev, &soc_ver, soc_type); + ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) - return ret; + goto out; } - bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); /* Setup patch / NVM configurations */ - ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver, + ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver, firmware_name); if (!ret) { - set_bit(QCA_IBS_ENABLED, &qca->flags); + clear_bit(QCA_IBS_DISABLED, &qca->flags); qca_debugfs_init(hdev); hu->hdev->hw_error = qca_hw_error; hu->hdev->cmd_timeout = qca_cmd_timeout; @@ -1720,20 +1725,22 @@ retry: * patch/nvm-config is found, so run with original fw/config. */ ret = 0; - } else { - if (retries < MAX_INIT_RETRIES) { - qca_power_shutdown(hu); - if (hu->serdev) { - serdev_device_close(hu->serdev); - ret = serdev_device_open(hu->serdev); - if (ret) { - bt_dev_err(hdev, "failed to open port"); - return ret; - } + } + +out: + if (ret && retries < MAX_INIT_RETRIES) { + bt_dev_warn(hdev, "Retry BT power ON:%d", retries); + qca_power_shutdown(hu); + if (hu->serdev) { + serdev_device_close(hu->serdev); + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hdev, "failed to open port"); + return ret; } - retries++; - goto retry; } + retries++; + goto retry; } /* Setup bdaddr */ @@ -1780,7 +1787,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = { { "vddch0", 450000 }, }, .num_vregs = 4, - .capabilities = QCA_CAP_WIDEBAND_SPEECH, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; static const struct qca_device_data qca_soc_data_wcn3998 = { @@ -1813,7 +1820,7 @@ static void qca_power_shutdown(struct hci_uart *hu) * data in skb's. */ spin_lock_irqsave(&qca->hci_ibs_lock, flags); - clear_bit(QCA_IBS_ENABLED, &qca->flags); + set_bit(QCA_IBS_DISABLED, &qca->flags); qca_flush(hu); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); @@ -1830,6 +1837,8 @@ static void qca_power_shutdown(struct hci_uart *hu) } else if (qcadev->bt_en) { gpiod_set_value_cansleep(qcadev->bt_en, 0); } + + set_bit(QCA_BT_OFF, &qca->flags); } static int qca_power_off(struct hci_dev *hdev) @@ -2017,11 +2026,17 @@ static int qca_serdev_probe(struct serdev_device *serdev) hdev->shutdown = qca_power_off; } - /* Wideband speech support must be set per driver since it can't be - * queried via hci. - */ - if (data && (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)) - set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); + if (data) { + /* Wideband speech support must be set per driver since it can't + * be queried via hci. Same with the valid le states quirk. + */ + if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH) + set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks); + + if (data->capabilities & QCA_CAP_VALID_LE_STATES) + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + } return 0; } @@ -2081,11 +2096,34 @@ static int __maybe_unused qca_suspend(struct device *dev) bool tx_pending = false; int ret = 0; u8 cmd; + u32 wait_timeout = 0; set_bit(QCA_SUSPENDING, &qca->flags); - /* Device is downloading patch or doesn't support in-band sleep. */ - if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) + if (test_bit(QCA_BT_OFF, &qca->flags)) + return 0; + + if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { + wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? + IBS_DISABLE_SSR_TIMEOUT_MS : + FW_DOWNLOAD_TIMEOUT_MS; + + /* QCA_IBS_DISABLED flag is set to true, During FW download + * and during memory dump collection. It is reset to false, + * After FW download complete and after memory dump collections. + */ + wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, + TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); + + if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { + bt_dev_err(hu->hdev, "SSR or FW download time out"); + ret = -ETIMEDOUT; + goto error; + } + } + + /* After memory dump collection, Controller is powered off.*/ + if (test_bit(QCA_BT_OFF, &qca->flags)) return 0; cancel_work_sync(&qca->ws_awake_device); diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index 3b393cb07295..3061896503f3 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -5,8 +5,8 @@ config MXC_CLK depends on ARCH_MXC || COMPILE_TEST config MXC_CLK_SCU - tristate "IMX SCU clock" - depends on ARCH_MXC || COMPILE_TEST + tristate + depends on ARCH_MXC depends on IMX_SCU && HAVE_ARM_SMCCC config CLK_IMX1 diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index d900f6bf53d0..892e91b92f2c 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -55,7 +55,7 @@ struct r9a06g032_clkdesc { u16 sel, g1, r1, g2, r2; } dual; }; -} __packed; +}; #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \ { .gate = _clk, .reset = _rst, \ diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index d08ac824c993..fd95edeb702b 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -29,7 +29,7 @@ #define PM_API_FEATURE_CHECK_MAX_ORDER 7 static bool feature_check_enabled; -DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); +static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER); /** * struct pm_api_feature_data - PM API Feature data diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 7cd5a29fc437..5645226ca3ce 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -142,6 +142,7 @@ config FPGA_DFL tristate "FPGA Device Feature List (DFL) support" select FPGA_BRIDGE select FPGA_REGION + depends on HAS_IOMEM help Device Feature List (DFL) defines a feature list structure that creates a linked list of feature headers within the MMIO space diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 5bda38e0780f..2bc173c352ce 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -192,6 +192,7 @@ static int arizona_gpio_probe(struct platform_device *pdev) ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip, arizona_gpio); if (ret < 0) { + pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); return ret; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 2a9046c0fb16..4275c18a097a 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -724,6 +724,8 @@ static int dwapb_gpio_probe(struct platform_device *pdev) return err; } + platform_set_drvdata(pdev, gpio); + return 0; } diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index ad61daf6c212..865ab2b34fdd 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -598,7 +598,7 @@ static int sprd_eic_probe(struct platform_device *pdev) */ res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) - continue; + break; sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sprd_eic->base[i])) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 433e2c3f3fd5..2f245594a90a 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -1197,6 +1197,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev) devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip); + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ + if (IS_ENABLED(CONFIG_PWM)) { + err = mvebu_pwm_probe(pdev, mvchip, id); + if (err) + return err; + } + /* Some gpio controllers do not provide irq support */ if (!have_irqs) return 0; @@ -1206,7 +1213,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); - return -ENODEV; + err = -ENODEV; + goto err_pwm; } err = irq_alloc_domain_generic_chips( @@ -1254,14 +1262,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip); } - /* Some MVEBU SoCs have simple PWM support for GPIO lines */ - if (IS_ENABLED(CONFIG_PWM)) - return mvebu_pwm_probe(pdev, mvchip, id); - return 0; err_domain: irq_domain_remove(mvchip->domain); +err_pwm: + pwmchip_remove(&mvchip->mvpwm->chip); return err; } diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 0b5a17ab996f..3521c1dc3ac0 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -574,7 +574,7 @@ static int zynq_gpio_irq_reqres(struct irq_data *d) struct gpio_chip *chip = irq_data_get_irq_chip_data(d); int ret; - ret = pm_runtime_get_sync(chip->parent); + ret = pm_runtime_resume_and_get(chip->parent); if (ret < 0) return ret; @@ -942,7 +942,7 @@ static int zynq_gpio_probe(struct platform_device *pdev) pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) goto err_pm_dis; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 089ddcaa9bc6..6e3c4d7a7d14 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1806,6 +1806,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request); */ void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset) { +#ifdef CONFIG_PINCTRL + if (list_empty(&gc->gpiodev->pin_ranges)) + return; +#endif + pinctrl_gpio_free(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_free); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 957934926b24..1b56dbc1f304 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -459,6 +459,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_bo *bo; struct amdgpu_bo_param bp; + struct drm_gem_object *gobj; int ret; memset(&bp, 0, sizeof(bp)); @@ -469,17 +470,20 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) bp.type = ttm_bo_type_sg; bp.resv = resv; dma_resv_lock(resv, NULL); - ret = amdgpu_bo_create(adev, &bp, &bo); + ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_CPU, + 0, ttm_bo_type_sg, resv, &gobj); if (ret) goto error; + bo = gem_to_amdgpu_bo(gobj); bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; if (dma_buf->ops != &amdgpu_dmabuf_ops) bo->prime_shared_count = 1; dma_resv_unlock(resv); - return &bo->tbo.base; + return gobj; error: dma_resv_unlock(resv); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7e8265da9f25..e8c76bd8c501 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -66,26 +66,12 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.type = type; bp.resv = resv; bp.preferred_domain = initial_domain; -retry: bp.flags = flags; bp.domain = initial_domain; r = amdgpu_bo_create(adev, &bp, &bo); - if (r) { - if (r != -ERESTARTSYS) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } - - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; - goto retry; - } - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); - } + if (r) return r; - } + *obj = &bo->tbo.base; return 0; @@ -225,7 +211,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint64_t size = args->in.bo_size; struct dma_resv *resv = NULL; struct drm_gem_object *gobj; - uint32_t handle; + uint32_t handle, initial_domain; int r; /* reject invalid gem flags */ @@ -269,9 +255,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, resv = vm->root.base.bo->tbo.base.resv; } +retry: + initial_domain = (u32)(0xffffffff & args->in.domains); r = amdgpu_gem_object_create(adev, size, args->in.alignment, - (u32)(0xffffffff & args->in.domains), + initial_domain, flags, ttm_bo_type_device, resv, &gobj); + if (r) { + if (r != -ERESTARTSYS) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } + + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; + goto retry; + } + DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", + size, initial_domain, args->in.alignment, r); + } + return r; + } + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { if (!r) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 36604d751d62..3e4892b7b7d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -499,6 +499,9 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) else size = amdgpu_gmc_get_vbios_fb_size(adev); + if (adev->mman.keep_stolen_vga_memory) + size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); + /* set to 0 if the pre-OS buffer uses up most of vram */ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) size = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4e36551ab50b..82cd8e55595a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1172,7 +1172,7 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) con->dir, &con->disable_ras_err_cnt_harvest); } -void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, +static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, struct ras_fs_if *head) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -1194,7 +1194,6 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) { -#if defined(CONFIG_DEBUG_FS) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; struct ras_fs_if fs_info; @@ -1203,7 +1202,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) * it won't be called in resume path, no need to check * suspend and gpu reset status */ - if (!con) + if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) return; amdgpu_ras_debugfs_create_ctrl_node(adev); @@ -1217,10 +1216,9 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_create(adev, &fs_info); } } -#endif } -void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, +static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); @@ -1234,7 +1232,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) { -#if defined(CONFIG_DEBUG_FS) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj, *tmp; @@ -1243,7 +1240,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) } con->dir = NULL; -#endif } /* debugfs end */ @@ -1291,7 +1287,8 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) { - amdgpu_ras_debugfs_remove_all(adev); + if (IS_ENABLED(CONFIG_DEBUG_FS)) + amdgpu_ras_debugfs_remove_all(adev); amdgpu_ras_sysfs_remove_all(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6b8d7bb83bb3..ec398ed7deb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -607,14 +607,8 @@ int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, struct ras_common_if *head); -void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, - struct ras_fs_if *head); - void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); -void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, - struct ras_common_if *head); - int amdgpu_ras_error_query(struct amdgpu_device *adev, struct ras_query_if *info); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 9f3952723c63..2a485052e3ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -186,7 +186,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) if (err) goto out; - err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]); + err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[i]); if (err) goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index e074f7ed388c..b5f8f3d731cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1011,6 +1011,11 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* set the write pointer delay */ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); @@ -1033,6 +1038,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + return 0; } @@ -1556,8 +1565,14 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + /* Stall DPG before WPTR/RPTR reset */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, + ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); + /* Restore */ ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + ring->wptr = 0; WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); @@ -1565,14 +1580,16 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + ring->wptr = 0; WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, - RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); + /* Unstall DPG */ + WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), + 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); @@ -1630,10 +1647,6 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, - lower_32_bits(ring->wptr) | 0x80000000); - if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 222f1df1a6b6..8cc51cec988a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1736,6 +1736,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, } mutex_unlock(&p->mutex); + dma_buf_put(dmabuf); args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); @@ -1745,6 +1746,7 @@ err_free: amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); err_unlock: mutex_unlock(&p->mutex); + dma_buf_put(dmabuf); return r; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9b6809f309f4..0f7749e9424d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1058,9 +1058,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - /* Update the actual used number of crtc */ - adev->mode_info.num_crtc = adev->dm.display_indexes_num; - /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -3251,6 +3248,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; + dm->display_indexes_num = dm->dc->caps.max_streams; + /* Update the actual used number of crtc */ + adev->mode_info.num_crtc = adev->dm.display_indexes_num; + link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { DRM_ERROR("DM: Failed to initialize mode config\n"); @@ -3312,8 +3313,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - dm->display_indexes_num = dm->dc->caps.max_streams; - /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 2f8fee05547a..6b431db146cd 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -163,8 +163,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, new_clocks->dppclk_khz = 100000; } - if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { - if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + /* + * Temporally ignore thew 0 cases for disp and dpp clks. + * We may have a new feature that requires 0 clks in the future. + */ + if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) { + new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz; + new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz; + } + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) { + if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz) dpp_clock_lowered = true; clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; update_dppclk = true; @@ -570,7 +579,7 @@ static struct clk_bw_params rn_bw_params = { }; -static struct wm_table ddr4_wm_table = { +static struct wm_table ddr4_wm_table_gs = { .entries = { { .wm_inst = WM_A, @@ -607,7 +616,7 @@ static struct wm_table ddr4_wm_table = { } }; -static struct wm_table lpddr4_wm_table = { +static struct wm_table lpddr4_wm_table_gs = { .entries = { { .wm_inst = WM_A, @@ -681,6 +690,80 @@ static struct wm_table lpddr4_wm_table_with_disabled_ppt = { } }; +static struct wm_table ddr4_wm_table_rn = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 9.09, + .sr_enter_plus_exit_time_us = 10.14, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.72, + .sr_exit_time_us = 10.12, + .sr_enter_plus_exit_time_us = 11.48, + .valid = true, + }, + } +}; + +static struct wm_table lpddr4_wm_table_rn = { + .entries = { + { + .wm_inst = WM_A, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 7.32, + .sr_enter_plus_exit_time_us = 8.38, + .valid = true, + }, + { + .wm_inst = WM_B, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.82, + .sr_enter_plus_exit_time_us = 11.196, + .valid = true, + }, + { + .wm_inst = WM_C, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.89, + .sr_enter_plus_exit_time_us = 11.24, + .valid = true, + }, + { + .wm_inst = WM_D, + .wm_type = WM_TYPE_PSTATE_CHG, + .pstate_latency_us = 11.65333, + .sr_exit_time_us = 9.748, + .sr_enter_plus_exit_time_us = 11.102, + .valid = true, + }, + } +}; + static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { int i; @@ -762,6 +845,11 @@ void rn_clk_mgr_construct( struct dc_debug_options *debug = &ctx->dc->debug; struct dpm_clocks clock_table = { 0 }; enum pp_smu_status status = 0; + int is_green_sardine = 0; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev); +#endif clk_mgr->base.ctx = ctx; clk_mgr->base.funcs = &dcn21_funcs; @@ -802,10 +890,16 @@ void rn_clk_mgr_construct( if (clk_mgr->periodic_retraining_disabled) { rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; } else { - rn_bw_params.wm_table = lpddr4_wm_table; + if (is_green_sardine) + rn_bw_params.wm_table = lpddr4_wm_table_gs; + else + rn_bw_params.wm_table = lpddr4_wm_table_rn; } } else { - rn_bw_params.wm_table = ddr4_wm_table; + if (is_green_sardine) + rn_bw_params.wm_table = ddr4_wm_table_gs; + else + rn_bw_params.wm_table = ddr4_wm_table_rn; } /* Saved clocks configured at boot for debug purposes */ rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fec87a2e210c..5b0cedfa824a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3394,10 +3394,13 @@ uint32_t dc_bandwidth_in_kbps_from_timing( { uint32_t bits_per_channel = 0; uint32_t kbps; + struct fixed31_32 link_bw_kbps; if (timing->flags.DSC) { - kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); - kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz); + link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); + link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel); + kbps = dc_fixpt_ceil(link_bw_kbps); return kbps; } diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h index b96520528240..9e837a5014c5 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu10.h +++ b/drivers/gpu/drm/amd/pm/inc/smu10.h @@ -136,14 +136,12 @@ #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) /* Workload bits */ -#define WORKLOAD_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 -#define WORKLOAD_PPLIB_VIDEO_BIT 3 -#define WORKLOAD_PPLIB_VR_BIT 4 -#define WORKLOAD_PPLIB_COMPUTE_BIT 5 -#define WORKLOAD_PPLIB_CUSTOM_BIT 6 -#define WORKLOAD_PPLIB_COUNT 7 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_VR_BIT 3 +#define WORKLOAD_PPLIB_COMPUTE_BIT 4 +#define WORKLOAD_PPLIB_CUSTOM_BIT 5 +#define WORKLOAD_PPLIB_COUNT 6 typedef struct { /* MP1_EXT_SCRATCH0 */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c index 719597c5d27d..6606511891e3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c @@ -24,6 +24,8 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <linux/pci.h> + #include <drm/amdgpu_drm.h> #include "processpptables.h" #include <atom-types.h> @@ -984,6 +986,8 @@ static int init_thermal_controller( struct pp_hwmgr *hwmgr, const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table) { + struct amdgpu_device *adev = hwmgr->adev; + hwmgr->thermal_controller.ucType = powerplay_table->sThermalController.ucType; hwmgr->thermal_controller.ucI2cLine = @@ -1008,7 +1012,104 @@ static int init_thermal_controller( ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, PHM_PlatformCaps_ThermalController); - hwmgr->thermal_controller.use_hw_fan_control = 1; + if (powerplay_table->usTableSize >= sizeof(ATOM_PPLIB_POWERPLAYTABLE3)) { + const ATOM_PPLIB_POWERPLAYTABLE3 *powerplay_table3 = + (const ATOM_PPLIB_POWERPLAYTABLE3 *)powerplay_table; + + if (0 == le16_to_cpu(powerplay_table3->usFanTableOffset)) { + hwmgr->thermal_controller.use_hw_fan_control = 1; + return 0; + } else { + const ATOM_PPLIB_FANTABLE *fan_table = + (const ATOM_PPLIB_FANTABLE *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table3->usFanTableOffset)); + + if (1 <= fan_table->ucFanTableFormat) { + hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst = + fan_table->ucTHyst; + hwmgr->thermal_controller.advanceFanControlParameters.usTMin = + le16_to_cpu(fan_table->usTMin); + hwmgr->thermal_controller.advanceFanControlParameters.usTMed = + le16_to_cpu(fan_table->usTMed); + hwmgr->thermal_controller.advanceFanControlParameters.usTHigh = + le16_to_cpu(fan_table->usTHigh); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table->usPWMMin); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed = + le16_to_cpu(fan_table->usPWMMed); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh = + le16_to_cpu(fan_table->usPWMHigh); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = 10900; + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay = 100000; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + } + + if (2 <= fan_table->ucFanTableFormat) { + const ATOM_PPLIB_FANTABLE2 *fan_table2 = + (const ATOM_PPLIB_FANTABLE2 *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table3->usFanTableOffset)); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table2->usTMax); + } + + if (3 <= fan_table->ucFanTableFormat) { + const ATOM_PPLIB_FANTABLE3 *fan_table3 = + (const ATOM_PPLIB_FANTABLE3 *) (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table3->usFanTableOffset)); + + hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode = + fan_table3->ucFanControlMode; + + if ((3 == fan_table->ucFanTableFormat) && + (0x67B1 == adev->pdev->device)) + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM = + 47; + else + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM = + le16_to_cpu(fan_table3->usFanPWMMax); + + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity = + 4836; + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table3->usFanOutputSensitivity); + } + + if (6 <= fan_table->ucFanTableFormat) { + const ATOM_PPLIB_FANTABLE4 *fan_table4 = + (const ATOM_PPLIB_FANTABLE4 *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table3->usFanTableOffset)); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM = + le16_to_cpu(fan_table4->usFanRPMMax); + } + + if (7 <= fan_table->ucFanTableFormat) { + const ATOM_PPLIB_FANTABLE5 *fan_table5 = + (const ATOM_PPLIB_FANTABLE5 *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table3->usFanTableOffset)); + + if (0x67A2 == adev->pdev->device || + 0x67A9 == adev->pdev->device || + 0x67B9 == adev->pdev->device) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GeminiRegulatorFanControlSupport); + hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentLow = + le16_to_cpu(fan_table5->usFanCurrentLow); + hwmgr->thermal_controller.advanceFanControlParameters.usFanCurrentHigh = + le16_to_cpu(fan_table5->usFanCurrentHigh); + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMLow = + le16_to_cpu(fan_table5->usFanRPMLow); + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMHigh = + le16_to_cpu(fan_table5->usFanRPMHigh); + } + } + } + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index cf60f3992303..e6f40ee9f313 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1297,15 +1297,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) int pplib_workload = 0; switch (power_profile) { - case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: - pplib_workload = WORKLOAD_DEFAULT_BIT; - break; case PP_SMC_POWER_PROFILE_FULLSCREEN3D: pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; break; - case PP_SMC_POWER_PROFILE_POWERSAVING: - pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; - break; case PP_SMC_POWER_PROFILE_VIDEO: pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; break; @@ -1315,6 +1309,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile) case PP_SMC_POWER_PROFILE_COMPUTE: pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; break; + case PP_SMC_POWER_PROFILE_CUSTOM: + pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; + break; } return pplib_workload; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 895d89bea7fa..cf7c4f0e0a0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -217,7 +217,7 @@ static struct cmn2asic_mapping sienna_cichlid_workload_map[PP_SMC_POWER_PROFILE_ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 2380759ddf48..6db96fa1df09 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1164,7 +1164,12 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, if (ret) return ret; - crystal_clock_freq = amdgpu_asic_get_xclk(adev); + /* + * crystal_clock_freq div by 4 is required since the fan control + * module refers to 25MHz + */ + + crystal_clock_freq = amdgpu_asic_get_xclk(adev) / 4; tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 99e682563d47..aabf09f89cad 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -18021,16 +18021,6 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) if (!HAS_GMCH(i915)) sanitize_watermarks(i915); - /* - * Force all active planes to recompute their states. So that on - * mode_setcrtc after probe, all the intel_plane_state variables - * are already calculated and there is no assert_plane warnings - * during bootup. - */ - ret = intel_initial_commit(dev); - if (ret) - drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); - return 0; } @@ -18039,11 +18029,21 @@ int intel_modeset_init(struct drm_i915_private *i915) { int ret; - intel_overlay_setup(i915); - if (!HAS_DISPLAY(i915)) return 0; + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(&i915->drm); + if (ret) + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); + + intel_overlay_setup(i915); + ret = intel_fbdev_init(&i915->drm); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index bf1e9cf1c0f3..9bc59fd2f95f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -573,7 +573,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, + min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1904e6e5ea64..b07dc1156a0e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -3097,7 +3097,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static void eb_request_add(struct i915_execbuffer *eb) +static int eb_request_add(struct i915_execbuffer *eb, int err) { struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); @@ -3118,6 +3118,7 @@ static void eb_request_add(struct i915_execbuffer *eb) /* Serialise with context_close via the add_to_timeline */ i915_request_set_error_once(rq, -ENOENT); __i915_request_skip(rq); + err = -ENOENT; /* override any transient errors */ } __i915_request_queue(rq, &attr); @@ -3127,6 +3128,8 @@ static void eb_request_add(struct i915_execbuffer *eb) retire_requests(tl, prev); mutex_unlock(&tl->mutex); + + return err; } static const i915_user_extension_fn execbuf_extensions[] = { @@ -3332,7 +3335,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb, batch); err_request: i915_request_get(eb.request); - eb_request_add(&eb); + err = eb_request_add(&eb, err); if (eb.fences) signal_fence_array(&eb); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index cf6e05ea4d8f..a24cc1ff08a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -101,18 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) intel_gt_pm_put_async(b->irq_engine->gt); } +static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ + spin_lock(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock(&b->irq_lock); +} + static void add_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - intel_context_get(ce); - list_add_tail(&ce->signal_link, &b->signalers); + lockdep_assert_held(&ce->signal_lock); + + spin_lock(&b->signalers_lock); + list_add_rcu(&ce->signal_link, &b->signalers); + spin_unlock(&b->signalers_lock); } -static void remove_signaling_context(struct intel_breadcrumbs *b, +static bool remove_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - list_del(&ce->signal_link); - intel_context_put(ce); + lockdep_assert_held(&ce->signal_lock); + + if (!list_empty(&ce->signals)) + return false; + + spin_lock(&b->signalers_lock); + list_del_rcu(&ce->signal_link); + spin_unlock(&b->signalers_lock); + + return true; } static inline bool __request_completed(const struct i915_request *rq) @@ -175,6 +194,8 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) static bool __signal_request(struct i915_request *rq) { + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + if (!__dma_fence_signal(&rq->fence)) { i915_request_put(rq); return false; @@ -195,15 +216,12 @@ static void signal_irq_work(struct irq_work *work) struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); const ktime_t timestamp = ktime_get(); struct llist_node *signal, *sn; - struct intel_context *ce, *cn; - struct list_head *pos, *next; + struct intel_context *ce; signal = NULL; if (unlikely(!llist_empty(&b->signaled_requests))) signal = llist_del_all(&b->signaled_requests); - spin_lock(&b->irq_lock); - /* * Keep the irq armed until the interrupt after all listeners are gone. * @@ -229,47 +247,44 @@ static void signal_irq_work(struct irq_work *work) * interrupt draw less ire from other users of the system and tools * like powertop. */ - if (!signal && b->irq_armed && list_empty(&b->signalers)) - __intel_breadcrumbs_disarm_irq(b); + if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers)) + intel_breadcrumbs_disarm_irq(b); - list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { - GEM_BUG_ON(list_empty(&ce->signals)); + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + struct i915_request *rq; - list_for_each_safe(pos, next, &ce->signals) { - struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + list_for_each_entry_rcu(rq, &ce->signals, signal_link) { + bool release; - GEM_BUG_ON(!check_signal_order(ce, rq)); if (!__request_completed(rq)) break; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)) + break; + /* * Queue for execution after dropping the signaling * spinlock as the callback chain may end up adding * more signalers to the same context or engine. */ - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(b, ce); + spin_unlock(&ce->signal_lock); + if (__signal_request(rq)) /* We own signal_node now, xfer to local list */ signal = slist_add(&rq->signal_node, signal); - } - /* - * We process the list deletion in bulk, only using a list_add - * (not list_move) above but keeping the status of - * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. - */ - if (!list_is_first(pos, &ce->signals)) { - /* Advance the list to the first incomplete request */ - __list_del_many(&ce->signals, pos); - if (&ce->signals == pos) { /* now empty */ + if (release) { add_retire(b, ce->timeline); - remove_signaling_context(b, ce); + intel_context_put(ce); } } } - - spin_unlock(&b->irq_lock); + rcu_read_unlock(); llist_for_each_safe(signal, sn, signal) { struct i915_request *rq = @@ -298,14 +313,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) if (!b) return NULL; - spin_lock_init(&b->irq_lock); + b->irq_engine = irq_engine; + + spin_lock_init(&b->signalers_lock); INIT_LIST_HEAD(&b->signalers); init_llist_head(&b->signaled_requests); + spin_lock_init(&b->irq_lock); init_irq_work(&b->irq_work, signal_irq_work); - b->irq_engine = irq_engine; - return b; } @@ -347,9 +363,9 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b) kfree(b); } -static void insert_breadcrumb(struct i915_request *rq, - struct intel_breadcrumbs *b) +static void insert_breadcrumb(struct i915_request *rq) { + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; struct list_head *pos; @@ -371,6 +387,7 @@ static void insert_breadcrumb(struct i915_request *rq, } if (list_empty(&ce->signals)) { + intel_context_get(ce); add_signaling_context(b, ce); pos = &ce->signals; } else { @@ -396,8 +413,9 @@ static void insert_breadcrumb(struct i915_request *rq, break; } } - list_add(&rq->signal_link, pos); + list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); /* @@ -410,7 +428,7 @@ static void insert_breadcrumb(struct i915_request *rq, bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b; + struct intel_context *ce = rq->context; /* Serialises with i915_request_retire() using rq->lock */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) @@ -425,67 +443,30 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) return true; - /* - * rq->engine is locked by rq->engine->active.lock. That however - * is not known until after rq->engine has been dereferenced and - * the lock acquired. Hence we acquire the lock and then validate - * that rq->engine still matches the lock we hold for it. - * - * Here, we are using the breadcrumb lock as a proxy for the - * rq->engine->active.lock, and we know that since the breadcrumb - * will be serialised within i915_request_submit/i915_request_unsubmit, - * the engine cannot change while active as long as we hold the - * breadcrumb lock on that engine. - * - * From the dma_fence_enable_signaling() path, we are outside of the - * request submit/unsubmit path, and so we must be more careful to - * acquire the right lock. - */ - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { - spin_unlock(&b->irq_lock); - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - } - - /* - * Now that we are finally serialised with request submit/unsubmit, - * [with b->irq_lock] and with i915_request_retire() [via checking - * SIGNALED with rq->lock] confirm the request is indeed active. If - * it is no longer active, the breadcrumb will be attached upon - * i915_request_submit(). - */ + spin_lock(&ce->signal_lock); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - insert_breadcrumb(rq, b); - - spin_unlock(&b->irq_lock); + insert_breadcrumb(rq); + spin_unlock(&ce->signal_lock); return true; } void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = rq->engine->breadcrumbs; + struct intel_context *ce = rq->context; + bool release; - /* - * We must wait for b->irq_lock so that we know the interrupt handler - * has released its reference to the intel_context and has completed - * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if - * required). - */ - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { - struct intel_context *ce = rq->context; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + return; - list_del(&rq->signal_link); - if (list_empty(&ce->signals)) - remove_signaling_context(b, ce); + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(rq->engine->breadcrumbs, ce); + spin_unlock(&ce->signal_lock); + if (release) + intel_context_put(ce); - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - i915_request_put(rq); - } - spin_unlock(&b->irq_lock); + i915_request_put(rq); } static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) @@ -495,18 +476,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) drm_printf(p, "Signals:\n"); - spin_lock_irq(&b->irq_lock); - list_for_each_entry(ce, &b->signalers, signal_link) { - list_for_each_entry(rq, &ce->signals, signal_link) { + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + list_for_each_entry_rcu(rq, &ce->signals, signal_link) drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", jiffies_to_msecs(jiffies - rq->emitted_jiffies)); - } } - spin_unlock_irq(&b->irq_lock); + rcu_read_unlock(); } void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h index 3fa19820b37a..a74bb3062bd8 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -29,18 +29,16 @@ * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects the lists used in hardirq context */ - /* Not all breadcrumbs are attached to physical HW */ struct intel_engine_cs *irq_engine; + spinlock_t signalers_lock; /* protects the list of signalers */ struct list_head signalers; struct llist_head signaled_requests; + spinlock_t irq_lock; /* protects the interrupt from hardirq context */ struct irq_work irq_work; /* for use from inside irq_lock */ - unsigned int irq_enabled; - bool irq_armed; }; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 92a3f25c4006..349e7fa1488d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void) return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); } -void intel_context_free(struct intel_context *ce) +static void rcu_context_free(struct rcu_head *rcu) { + struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); + kmem_cache_free(global.slab_ce, ce); } +void intel_context_free(struct intel_context *ce) +{ + call_rcu(&ce->rcu, rcu_context_free); +} + struct intel_context * intel_context_create(struct intel_engine_cs *engine) { @@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active) } void -intel_context_init(struct intel_context *ce, - struct intel_engine_cs *engine) +intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) { GEM_BUG_ON(!engine->cops); GEM_BUG_ON(!engine->gt->vm); @@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce, ce->vm = i915_vm_get(engine->gt->vm); - INIT_LIST_HEAD(&ce->signal_link); + /* NB ce->signal_link/lock is used under RCU */ + spin_lock_init(&ce->signal_lock); INIT_LIST_HEAD(&ce->signals); mutex_init(&ce->pin_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 552cb57a2e8c..52fa9c132746 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -25,6 +25,7 @@ DECLARE_EWMA(runtime, 3, 8); struct i915_gem_context; struct i915_gem_ww_ctx; struct i915_vma; +struct intel_breadcrumbs; struct intel_context; struct intel_ring; @@ -44,7 +45,16 @@ struct intel_context_ops { }; struct intel_context { - struct kref ref; + /* + * Note: Some fields may be accessed under RCU. + * + * Unless otherwise noted a field can safely be assumed to be protected + * by strong reference counting. + */ + union { + struct kref ref; /* no kref_get_unless_zero()! */ + struct rcu_head rcu; + }; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; @@ -54,8 +64,15 @@ struct intel_context { struct i915_address_space *vm; struct i915_gem_context __rcu *gem_context; - struct list_head signal_link; - struct list_head signals; + /* + * @signal_lock protects the list of requests that need signaling, + * @signals. While there are any requests that need signaling, + * we add the context to the breadcrumbs worker, and remove it + * upon completion/cancellation of the last request. + */ + struct list_head signal_link; /* Accessed under RCU */ + struct list_head signals; /* Guarded by signal_lock */ + spinlock_t signal_lock; /* protects signals, the list of requests */ struct i915_vma *state; struct intel_ring *ring; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0952bf157234..724b2cb897d3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2788,6 +2788,9 @@ static void __execlists_hold(struct i915_request *rq) static bool execlists_hold(struct intel_engine_cs *engine, struct i915_request *rq) { + if (i915_request_on_hold(rq)) + return false; + spin_lock_irq(&engine->active.lock); if (i915_request_completed(rq)) { /* too late! */ @@ -3169,8 +3172,10 @@ static void execlists_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->active.lock, flags); /* Recheck after serialising with direct-submission */ - if (unlikely(timeout && preempt_timeout(engine))) + if (unlikely(timeout && preempt_timeout(engine))) { + cancel_timer(&engine->execlists.preempt); execlists_reset(engine, "preemption time out"); + } } } diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 313e51e7d4f7..413dadfac2d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -59,8 +59,7 @@ struct drm_i915_mocs_table { #define _L3_CACHEABILITY(value) ((value) << 4) /* Helper defines */ -#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ -#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ +#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ /* @@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), - L3_3_WB) + L3_3_WB), + + /* + * mocs:63 + * - used by the L3 for all of its evictions. + * Thus it is expected to allow LLC cacheability to enable coherent + * flows to be maintained. + * - used to force L3 uncachable cycles. + * Thus it is expected to make the surface L3 uncacheable. + */ + MOCS_ENTRY(63, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ @@ -316,11 +327,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index e6a00eea0631..c1c9cc0ad3b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -883,6 +883,10 @@ void intel_rps_park(struct intel_rps *rps) adj = -2; rps->last_adj = adj; rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); + if (rps->cur_freq < rps->efficient_freq) { + rps->cur_freq = rps->efficient_freq; + rps->last_adj = 0; + } GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index f011ea42487e..5982b62f913d 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file) mapping_set_unevictable(file->f_mapping); return vaddr; err_page: - while (--i >= 0) + while (i--) put_page(pages[i]); kvfree(pages); return NULL; @@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off, return PTR_ERR(page); vaddr = kmap(page); - if (write) + if (write) { memcpy(vaddr + offset_in_page(off), ptr, this); - else + set_page_dirty(page); + } else { memcpy(ptr, vaddr + offset_in_page(off), this); + } + mark_page_accessed(page); kunmap(page); put_page(page); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 874af6db6103..620b6fab2c5c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -177,10 +177,8 @@ struct i915_request { struct intel_ring *ring; struct intel_timeline __rcu *timeline; - union { - struct list_head signal_link; - struct llist_node signal_node; - }; + struct list_head signal_link; + struct llist_node signal_node; /* * The rcu epoch of when this request was allocated. Used to judiciously diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 23a6132c5f4e..412e21604a05 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg) return PTR_ERR(obj); obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); + if (IS_ERR(obj2)) { + err = PTR_ERR(obj2); goto put1; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index b721b8b262ce..9e1224d54729 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -22,6 +22,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -484,17 +485,27 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, writel(ctrl, mxsfb->base + LCDC_AS_CTRL); } +static bool mxsfb_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR; +} + static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, }; static const struct drm_plane_funcs mxsfb_plane_funcs = { + .format_mod_supported = mxsfb_format_mod_supported, .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 56b335a55966..7daa12eec01b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1214,8 +1214,8 @@ retry: } reg->bus.offset = handle; - ret = 0; } + ret = 0; break; default: ret = -EINVAL; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 033fd30074b0..282e4c837cd9 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge, sdi->pixelclock = adjusted_mode->clock * 1000; } -static void sdi_bridge_enable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_enable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); struct dispc_clock_info dispc_cinfo; @@ -259,8 +258,7 @@ err_get_dispc: regulator_disable(sdi->vdds_sdi_reg); } -static void sdi_bridge_disable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_disable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); @@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = { .mode_valid = sdi_bridge_mode_valid, .mode_fixup = sdi_bridge_mode_fixup, .mode_set = sdi_bridge_mode_set, - .atomic_enable = sdi_bridge_enable, - .atomic_disable = sdi_bridge_disable, + .enable = sdi_bridge_enable, + .disable = sdi_bridge_disable, }; static void sdi_bridge_init(struct sdi_device *sdi) diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index e95fdfb16b6c..ba0b3ead150f 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi) lcd->spi = spi; mutex_init(&lcd->mutex); - lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(lcd->reset_gpio)) { dev_err(&spi->dev, "failed to get reset GPIO\n"); return PTR_ERR(lcd->reset_gpio); diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index f292c6a6e20f..41edd0a421b2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, struct device_node *port, *endpoint; int ret = 0, child_count = 0; const char *name; - u32 endpoint_id; + u32 endpoint_id = 0; lvds->drm_dev = drm_dev; port = of_graph_get_port_by_id(dev->of_node, 1); diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index ba9d1c3e7cac..e4baf07992a4 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -90,7 +90,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) if (!fpriv) return -ENOMEM; - idr_init(&fpriv->contexts); + idr_init_base(&fpriv->contexts, 1); mutex_init(&fpriv->lock); filp->driver_priv = fpriv; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 5a4fd0dbf4cf..47d26b5d9945 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -129,7 +129,6 @@ int tegra_output_probe(struct tegra_output *output) if (!output->ddc) { err = -EPROBE_DEFER; - of_node_put(ddc); return err; } } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e88a17c2937f..cc2aa2308a51 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -397,7 +397,6 @@ struct tegra_sor; struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); - int (*remove)(struct tegra_sor *sor); void (*audio_enable)(struct tegra_sor *sor); void (*audio_disable)(struct tegra_sor *sor); }; @@ -2942,6 +2941,24 @@ static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_disable_regulator(void *data) +{ + struct regulator *reg = data; + + regulator_disable(reg); +} + +static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg) +{ + int err; + + err = regulator_enable(reg); + if (err) + return err; + + return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg); +} + static int tegra_sor_hdmi_probe(struct tegra_sor *sor) { int err; @@ -2953,7 +2970,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->avdd_io_supply); } - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) { dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", err); @@ -2967,7 +2984,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->vdd_pll_supply); } - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) { dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", err); @@ -2981,7 +2998,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->hdmi_supply); } - err = regulator_enable(sor->hdmi_supply); + err = tegra_sor_enable_regulator(sor, sor->hdmi_supply); if (err < 0) { dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); return err; @@ -2992,19 +3009,9 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return 0; } -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_hdmi_ops = { .name = "HDMI", .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, .audio_enable = tegra_sor_hdmi_audio_enable, .audio_disable = tegra_sor_hdmi_audio_disable, }; @@ -3017,7 +3024,7 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->avdd_io_supply)) return PTR_ERR(sor->avdd_io_supply); - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) return err; @@ -3025,25 +3032,16 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->vdd_pll_supply)) return PTR_ERR(sor->vdd_pll_supply); - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) return err; return 0; } -static int tegra_sor_dp_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_dp_ops = { .name = "DP", .probe = tegra_sor_dp_probe, - .remove = tegra_sor_dp_remove, }; static int tegra_sor_init(struct host1x_client *client) @@ -3145,6 +3143,7 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + clk_disable_unprepare(sor->clk); return err; } @@ -3152,12 +3151,17 @@ static int tegra_sor_init(struct host1x_client *client) } err = clk_prepare_enable(sor->clk_safe); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk); return err; + } err = clk_prepare_enable(sor->clk_dp); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk); return err; + } return 0; } @@ -3764,17 +3768,16 @@ static int tegra_sor_probe(struct platform_device *pdev) return err; err = tegra_output_probe(&sor->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to probe output: %d\n", err); - return err; - } + if (err < 0) + return dev_err_probe(&pdev->dev, err, + "failed to probe output\n"); if (sor->ops && sor->ops->probe) { err = sor->ops->probe(sor); if (err < 0) { dev_err(&pdev->dev, "failed to probe %s: %d\n", sor->ops->name, err); - goto output; + goto remove; } } @@ -3955,9 +3958,6 @@ unregister: rpm_disable: pm_runtime_disable(&pdev->dev); remove: - if (sor->ops && sor->ops->remove) - sor->ops->remove(sor); -output: tegra_output_remove(&sor->output); return err; } @@ -3976,12 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); - if (sor->ops && sor->ops->remove) { - err = sor->ops->remove(sor); - if (err < 0) - dev_err(&pdev->dev, "failed to remove SOR: %d\n", err); - } - tegra_output_remove(&sor->output); return 0; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a97a9d058198..a49e0ed4a599 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -734,6 +734,7 @@ config I2C_LPC2K config I2C_MLXBF tristate "Mellanox BlueField I2C controller" depends on MELLANOX_PLATFORM && ARM64 + select I2C_SLAVE help Enabling this option will add I2C SMBus support for Mellanox BlueField system. diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index c98529c76348..e6f8d6e45a15 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -412,6 +412,19 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) dma->chan_using = NULL; } +static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) +{ + unsigned int temp; + + /* + * i2sr_clr_opcode is the value to clear all interrupts. Here we want to + * clear only <bits>, so we write ~i2sr_clr_opcode with just <bits> + * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. + */ + temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); +} + static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic) { unsigned long orig_jiffies = jiffies; @@ -424,8 +437,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool a /* check for arbitration lost */ if (temp & I2SR_IAL) { - temp &= ~I2SR_IAL; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); return -EAGAIN; } @@ -469,7 +481,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic) */ readb_poll_timeout_atomic(addr, regval, regval & I2SR_IIF, 5, 1000 + 100); i2c_imx->i2csr = regval; - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR); + i2c_imx_clear_irq(i2c_imx, I2SR_IIF | I2SR_IAL); } else { wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); } @@ -478,6 +490,16 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx, bool atomic) dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } + + /* check for arbitration lost */ + if (i2c_imx->i2csr & I2SR_IAL) { + dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); + i2c_imx_clear_irq(i2c_imx, I2SR_IAL); + + i2c_imx->i2csr = 0; + return -EAGAIN; + } + dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; @@ -593,6 +615,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx, bool atomic) /* Stop I2C transaction */ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); if (i2c_imx->dma) temp &= ~I2CR_DMAEN; @@ -623,9 +647,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) if (temp & I2SR_IIF) { /* save status register */ i2c_imx->i2csr = temp; - temp &= ~I2SR_IIF; - temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF); - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); + i2c_imx_clear_irq(i2c_imx, I2SR_IIF); wake_up(&i2c_imx->queue); return IRQ_HANDLED; } @@ -758,9 +780,12 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, */ dev_dbg(dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - i2c_imx_bus_busy(i2c_imx, 0, false); + if (!i2c_imx->stopped) + i2c_imx_bus_busy(i2c_imx, 0, false); } else { /* * For i2c master receiver repeat restart operation like: @@ -885,9 +910,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + if (!(temp & I2CR_MSTA)) + i2c_imx->stopped = 1; temp &= ~(I2CR_MSTA | I2CR_MTX); imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - i2c_imx_bus_busy(i2c_imx, 0, atomic); + if (!i2c_imx->stopped) + i2c_imx_bus_busy(i2c_imx, 0, atomic); } else { /* * For i2c master receiver repeat restart operation like: diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index 33574d40ea9c..2fb0532d8a16 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -1258,9 +1258,9 @@ static int mlxbf_i2c_get_gpio(struct platform_device *pdev, return -EFAULT; gpio_res->io = devm_ioremap(dev, params->start, size); - if (IS_ERR(gpio_res->io)) { + if (!gpio_res->io) { devm_release_mem_region(dev, params->start, size); - return PTR_ERR(gpio_res->io); + return -ENOMEM; } return 0; @@ -1323,9 +1323,9 @@ static int mlxbf_i2c_get_corepll(struct platform_device *pdev, return -EFAULT; corepll_res->io = devm_ioremap(dev, params->start, size); - if (IS_ERR(corepll_res->io)) { + if (!corepll_res->io) { devm_release_mem_region(dev, params->start, size); - return PTR_ERR(corepll_res->io); + return -ENOMEM; } return 0; @@ -1717,9 +1717,9 @@ static int mlxbf_i2c_init_coalesce(struct platform_device *pdev, return -EFAULT; coalesce_res->io = ioremap(params->start, size); - if (IS_ERR(coalesce_res->io)) { + if (!coalesce_res->io) { release_mem_region(params->start, size); - return PTR_ERR(coalesce_res->io); + return -ENOMEM; } priv->coalesce = coalesce_res; diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index f13735beca58..1c259b5188de 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -194,9 +194,9 @@ static irqreturn_t cci_isr(int irq, void *dev) if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) { if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR || val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR) - cci->master[0].status = -ENXIO; + cci->master[1].status = -ENXIO; else - cci->master[0].status = -EIO; + cci->master[1].status = -EIO; writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ); ret = IRQ_HANDLED; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index fbc04b60cfd1..5a47915869ae 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -801,7 +801,8 @@ static int qup_i2c_bam_schedule_desc(struct qup_i2c_dev *qup) if (ret || qup->bus_err || qup->qup_err) { reinit_completion(&qup->xfer); - if (qup_i2c_change_state(qup, QUP_RUN_STATE)) { + ret = qup_i2c_change_state(qup, QUP_RUN_STATE); + if (ret) { dev_err(qup->dev, "change to run state timed out"); goto desc_err; } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 7ee7ffe22ae3..d79335506ecd 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1140,6 +1140,20 @@ static bool __init intel_idle_max_cstate_reached(int cstate) return false; } +static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) +{ + unsigned long eax = flg2MWAIT(state->flags); + + if (boot_cpu_has(X86_FEATURE_ARAT)) + return false; + + /* + * Switch over to one-shot tick broadcast if the target C-state + * is deeper than C1. + */ + return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); +} + #ifdef CONFIG_ACPI_PROCESSOR_CSTATE #include <acpi/processor.h> @@ -1210,20 +1224,6 @@ static bool __init intel_idle_acpi_cst_extract(void) return false; } -static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) -{ - unsigned long eax = flg2MWAIT(state->flags); - - if (boot_cpu_has(X86_FEATURE_ARAT)) - return false; - - /* - * Switch over to one-shot tick broadcast if the target C-state - * is deeper than C1. - */ - return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK); -} - static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count); diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 8017c40dd110..7989b7e1d1c0 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1269,9 +1269,6 @@ ssize_t rdma_query_gid_table(struct ib_device *device, unsigned long flags; rdma_for_each_port(device, port_num) { - if (!rdma_ib_or_roce(device, port_num)) - continue; - table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); for (i = 0; i < table->sz; i++) { diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 012156624b82..5afd142fe8c7 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1522,6 +1522,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; goto out; } @@ -2114,6 +2115,7 @@ static int cm_req_handler(struct cm_work *work) id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; goto destroy; } cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 191e0843f090..4e940fc50bba 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -940,8 +940,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 1); EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1); - params.cur_qp_state = qp_attr->cur_qp_state; - params.qp_state = qp_attr->qp_state; + params.cur_qp_state = cur_state; + params.qp_state = new_state; } if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 70c8fd67ee2f..084652e2b15a 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -138,13 +138,6 @@ static int mlx5_ib_create_counters(struct ib_counters *counters, } -static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev) -{ - return MLX5_ESWITCH_MANAGER(mdev) && - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == - MLX5_ESWITCH_OFFLOADS; -} - static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev, u8 port_num) { diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c index 5c3d052ac30b..9164cc069ad4 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.c +++ b/drivers/infiniband/hw/mlx5/ib_rep.c @@ -13,7 +13,7 @@ mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) struct mlx5_ib_dev *ibdev; int vport_index; - ibdev = mlx5_ib_get_uplink_ibdev(dev->priv.eswitch); + ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB); vport_index = rep->vport_index; ibdev->port[vport_index].rep = rep; @@ -33,6 +33,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) const struct mlx5_ib_profile *profile; struct mlx5_ib_dev *ibdev; int vport_index; + int ret; if (rep->vport == MLX5_VPORT_UPLINK) profile = &raw_eth_profile; @@ -46,8 +47,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port), GFP_KERNEL); if (!ibdev->port) { - ib_dealloc_device(&ibdev->ib_dev); - return -ENOMEM; + ret = -ENOMEM; + goto fail_port; } ibdev->is_rep = true; @@ -58,12 +59,24 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ibdev->mdev = dev; ibdev->num_ports = num_ports; - if (!__mlx5_ib_add(ibdev, profile)) - return -EINVAL; + ret = __mlx5_ib_add(ibdev, profile); + if (ret) + goto fail_add; rep->rep_data[REP_IB].priv = ibdev; return 0; + +fail_add: + kfree(ibdev->port); +fail_port: + ib_dealloc_device(&ibdev->ib_dev); + return ret; +} + +static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) +{ + return rep->rep_data[REP_IB].priv; } static void @@ -83,59 +96,18 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); } -static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) -{ - return mlx5_ib_rep_to_dev(rep); -} - static const struct mlx5_eswitch_rep_ops rep_ops = { .load = mlx5_ib_vport_rep_load, .unload = mlx5_ib_vport_rep_unload, - .get_proto_dev = mlx5_ib_vport_get_proto_dev, + .get_proto_dev = mlx5_ib_rep_to_dev, }; -void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) -{ - struct mlx5_eswitch *esw = mdev->priv.eswitch; - - mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB); -} - -void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) -{ - struct mlx5_eswitch *esw = mdev->priv.eswitch; - - mlx5_eswitch_unregister_vport_reps(esw, REP_IB); -} - -u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) -{ - return mlx5_eswitch_mode(esw); -} - -struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, - u16 vport_num) -{ - return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB); -} - struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, u16 vport_num) { return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH); } -struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw) -{ - return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB); -} - -struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, - u16 vport_num) -{ - return mlx5_eswitch_vport_rep(esw, vport_num); -} - struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, u16 port) @@ -154,3 +126,49 @@ struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport, sq->base.mqp.qpn); } + +static int mlx5r_rep_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = idev->mdev; + struct mlx5_eswitch *esw; + + esw = mdev->priv.eswitch; + mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB); + return 0; +} + +static void mlx5r_rep_remove(struct auxiliary_device *adev) +{ + struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = idev->mdev; + struct mlx5_eswitch *esw; + + esw = mdev->priv.eswitch; + mlx5_eswitch_unregister_vport_reps(esw, REP_IB); +} + +static const struct auxiliary_device_id mlx5r_rep_id_table[] = { + { .name = MLX5_ADEV_NAME ".rdma-rep", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5r_rep_id_table); + +static struct auxiliary_driver mlx5r_rep_driver = { + .name = "rep", + .probe = mlx5r_rep_probe, + .remove = mlx5r_rep_remove, + .id_table = mlx5r_rep_id_table, +}; + +int mlx5r_rep_init(void) +{ + return auxiliary_driver_register(&mlx5r_rep_driver); +} + +void mlx5r_rep_cleanup(void) +{ + auxiliary_driver_unregister(&mlx5r_rep_driver); +} diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h index 5b30d3fa8f8d..ce1dcb105dbd 100644 --- a/drivers/infiniband/hw/mlx5/ib_rep.h +++ b/drivers/infiniband/hw/mlx5/ib_rep.h @@ -12,47 +12,16 @@ extern const struct mlx5_ib_profile raw_eth_profile; #ifdef CONFIG_MLX5_ESWITCH -u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw); -struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, - u16 vport_num); -struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw); -struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, - u16 vport_num); -void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev); -void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev); +int mlx5r_rep_init(void); +void mlx5r_rep_cleanup(void); struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, u16 port); struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, u16 vport_num); #else /* CONFIG_MLX5_ESWITCH */ -static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) -{ - return MLX5_ESWITCH_NONE; -} - -static inline -struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, - u16 vport_num) -{ - return NULL; -} - -static inline -struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw) -{ - return NULL; -} - -static inline -struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, - u16 vport_num) -{ - return NULL; -} - -static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {} -static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {} +static inline int mlx5r_rep_init(void) { return 0; } +static inline void mlx5r_rep_cleanup(void) {} static inline struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_ib_sq *sq, @@ -68,10 +37,4 @@ struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, return NULL; } #endif - -static inline -struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep) -{ - return rep->rep_data[REP_IB].priv; -} #endif /* __MLX5_IB_REP_H__ */ diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 246e3cbe0b2c..55545f1286e5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4593,8 +4593,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev, ib_dealloc_device(&dev->ib_dev); } -void *__mlx5_ib_add(struct mlx5_ib_dev *dev, - const struct mlx5_ib_profile *profile) +int __mlx5_ib_add(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile) { int err; int i; @@ -4610,13 +4610,16 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev, } dev->ib_active = true; - - return dev; + return 0; err_out: - __mlx5_ib_remove(dev, profile, i); - - return NULL; + /* Clean up stages which were initialized */ + while (i) { + i--; + if (profile->stage[i].cleanup) + profile->stage[i].cleanup(dev); + } + return -ENOMEM; } static const struct mlx5_ib_profile pf_profile = { @@ -4739,8 +4742,11 @@ const struct mlx5_ib_profile raw_eth_profile = { NULL), }; -static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) +static int mlx5r_mp_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = idev->mdev; struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_dev *dev; bool bound = false; @@ -4748,15 +4754,14 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); if (!mpi) - return NULL; + return -ENOMEM; mpi->mdev = mdev; - err = mlx5_query_nic_vport_system_image_guid(mdev, &mpi->sys_image_guid); if (err) { kfree(mpi); - return NULL; + return err; } mutex_lock(&mlx5_ib_multiport_mutex); @@ -4777,40 +4782,46 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) } mutex_unlock(&mlx5_ib_multiport_mutex); - return mpi; + dev_set_drvdata(&adev->dev, mpi); + return 0; +} + +static void mlx5r_mp_remove(struct auxiliary_device *adev) +{ + struct mlx5_ib_multiport_info *mpi; + + mpi = dev_get_drvdata(&adev->dev); + mutex_lock(&mlx5_ib_multiport_mutex); + if (mpi->ibdev) + mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); + list_del(&mpi->list); + mutex_unlock(&mlx5_ib_multiport_mutex); + kfree(mpi); } -static void *mlx5_ib_add(struct mlx5_core_dev *mdev) +static int mlx5r_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = idev->mdev; const struct mlx5_ib_profile *profile; + int port_type_cap, num_ports, ret; enum rdma_link_layer ll; struct mlx5_ib_dev *dev; - int port_type_cap; - int num_ports; - - if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { - if (!mlx5_core_mp_enabled(mdev)) - mlx5_ib_register_vport_reps(mdev); - return mdev; - } port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); - if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) - return mlx5_ib_add_slave_port(mdev); - num_ports = max(MLX5_CAP_GEN(mdev, num_ports), MLX5_CAP_GEN(mdev, num_vhca_ports)); dev = ib_alloc_device(mlx5_ib_dev, ib_dev); if (!dev) - return NULL; + return -ENOMEM; dev->port = kcalloc(num_ports, sizeof(*dev->port), GFP_KERNEL); if (!dev->port) { ib_dealloc_device(&dev->ib_dev); - return NULL; + return -ENOMEM; } dev->mdev = mdev; @@ -4821,38 +4832,50 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) else profile = &pf_profile; - return __mlx5_ib_add(dev, profile); + ret = __mlx5_ib_add(dev, profile); + if (ret) { + kfree(dev->port); + ib_dealloc_device(&dev->ib_dev); + return ret; + } + + dev_set_drvdata(&adev->dev, dev); + return 0; } -static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) +static void mlx5r_remove(struct auxiliary_device *adev) { - struct mlx5_ib_multiport_info *mpi; struct mlx5_ib_dev *dev; - if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) { - mlx5_ib_unregister_vport_reps(mdev); - return; - } - - if (mlx5_core_is_mp_slave(mdev)) { - mpi = context; - mutex_lock(&mlx5_ib_multiport_mutex); - if (mpi->ibdev) - mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); - list_del(&mpi->list); - mutex_unlock(&mlx5_ib_multiport_mutex); - kfree(mpi); - return; - } - - dev = context; + dev = dev_get_drvdata(&adev->dev); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); } -static struct mlx5_interface mlx5_ib_interface = { - .add = mlx5_ib_add, - .remove = mlx5_ib_remove, - .protocol = MLX5_INTERFACE_PROTOCOL_IB, +static const struct auxiliary_device_id mlx5r_mp_id_table[] = { + { .name = MLX5_ADEV_NAME ".multiport", }, + {}, +}; + +static const struct auxiliary_device_id mlx5r_id_table[] = { + { .name = MLX5_ADEV_NAME ".rdma", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table); +MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table); + +static struct auxiliary_driver mlx5r_mp_driver = { + .name = "multiport", + .probe = mlx5r_mp_probe, + .remove = mlx5r_mp_remove, + .id_table = mlx5r_mp_id_table, +}; + +static struct auxiliary_driver mlx5r_driver = { + .name = "rdma", + .probe = mlx5r_probe, + .remove = mlx5r_remove, + .id_table = mlx5r_id_table, }; unsigned long mlx5_ib_get_xlt_emergency_page(void) @@ -4868,7 +4891,7 @@ void mlx5_ib_put_xlt_emergency_page(void) static int __init mlx5_ib_init(void) { - int err; + int ret; xlt_emergency_page = __get_free_page(GFP_KERNEL); if (!xlt_emergency_page) @@ -4883,15 +4906,33 @@ static int __init mlx5_ib_init(void) } mlx5_ib_odp_init(); + ret = mlx5r_rep_init(); + if (ret) + goto rep_err; + ret = auxiliary_driver_register(&mlx5r_mp_driver); + if (ret) + goto mp_err; + ret = auxiliary_driver_register(&mlx5r_driver); + if (ret) + goto drv_err; + return 0; - err = mlx5_register_interface(&mlx5_ib_interface); - - return err; +drv_err: + auxiliary_driver_unregister(&mlx5r_mp_driver); +mp_err: + mlx5r_rep_cleanup(); +rep_err: + destroy_workqueue(mlx5_ib_event_wq); + free_page((unsigned long)xlt_emergency_page); + return ret; } static void __exit mlx5_ib_cleanup(void) { - mlx5_unregister_interface(&mlx5_ib_interface); + auxiliary_driver_unregister(&mlx5r_driver); + auxiliary_driver_unregister(&mlx5r_mp_driver); + mlx5r_rep_cleanup(); + destroy_workqueue(mlx5_ib_event_wq); mutex_destroy(&xlt_emergency_page_mutex); free_page(xlt_emergency_page); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index b1f2b34e5955..ea5243815cf6 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -1317,8 +1317,8 @@ extern const struct mmu_interval_notifier_ops mlx5_mn_ops; void __mlx5_ib_remove(struct mlx5_ib_dev *dev, const struct mlx5_ib_profile *profile, int stage); -void *__mlx5_ib_add(struct mlx5_ib_dev *dev, - const struct mlx5_ib_profile *profile); +int __mlx5_ib_add(struct mlx5_ib_dev *dev, + const struct mlx5_ib_profile *profile); int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port, struct ifla_vf_info *info); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 019642ff24a7..511c95bb3d01 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1936,6 +1936,15 @@ static int qedr_create_user_qp(struct qedr_dev *dev, } if (rdma_protocol_iwarp(&dev->ibdev, 1)) { + qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset; + + /* calculate the db_rec_db2 data since it is constant so no + * need to reflect from user + */ + qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid); + qp->urq.db_rec_db2_data.data.value = + cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD); + rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr, &qp->urq.db_rec_db2_data, DB_REC_WIDTH_32B, diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index c77cdb3b62b5..8c73377ac82c 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -241,6 +241,7 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, @@ -418,6 +419,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cae1a3fae83a..d14a65683c5e 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/input.h> #include <linux/init.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/acpi.h> #include <linux/dmi.h> diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a4c9b9652560..7ecb65176c1a 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -219,6 +219,10 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), + }, }, { } }; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 944cbb519c6d..abae23af0791 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1471,7 +1471,8 @@ static int __init i8042_setup_aux(void) if (error) goto err_free_ports; - if (aux_enable()) + error = aux_enable(); + if (error) goto err_free_irq; i8042_aux_irq_registered = true; diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 98f17fa3a892..b6f75367a284 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -2183,11 +2183,11 @@ static int mxt_initialize(struct mxt_data *data) msleep(MXT_FW_RESET_TIME); } - error = mxt_acquire_irq(data); + error = mxt_check_retrigen(data); if (error) return error; - error = mxt_check_retrigen(data); + error = mxt_acquire_irq(data); if (error) return error; diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 89647700bab2..494b42a31b7a 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -257,7 +257,7 @@ #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) #define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) -#define DTE_IRQ_TABLE_LEN (8ULL << 1) +#define DTE_IRQ_TABLE_LEN (9ULL << 1) #define DTE_IRQ_REMAP_ENABLE 1ULL #define PAGE_MODE_NONE 0x00 diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9644424591da..4bc453f5bbaa 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -712,10 +712,6 @@ static bool block_size_is_power_of_two(struct cache *cache) return cache->sectors_per_block_shift >= 0; } -/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ -#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 -__always_inline -#endif static dm_block_t block_div(dm_block_t b, uint32_t n) { do_div(b, n); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 3fc3757def55..5a7a1b90e671 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3462,7 +3462,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, int r; if (a->alg_string) { - *hash = crypto_alloc_shash(a->alg_string, 0, 0); + *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(*hash)) { *error = error_alg; r = PTR_ERR(*hash); @@ -3519,7 +3519,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) struct journal_completion comp; comp.ic = ic; - ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); + ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY); if (IS_ERR(ic->journal_crypt)) { *error = "Invalid journal cipher"; r = PTR_ERR(ic->journal_crypt); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ce543b761be7..7eeb7c4169c9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -18,7 +18,6 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/atomic.h> -#include <linux/lcm.h> #include <linux/blk-mq.h> #include <linux/mount.h> #include <linux/dax.h> @@ -1247,12 +1246,6 @@ void dm_table_event_callback(struct dm_table *t, void dm_table_event(struct dm_table *t) { - /* - * You can no longer call dm_table_event() from interrupt - * context, use a bottom half instead. - */ - BUG_ON(in_interrupt()); - mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); @@ -1455,10 +1448,6 @@ int dm_calculate_queue_limits(struct dm_table *table, zone_sectors = ti_limits.chunk_sectors; } - /* Stack chunk_sectors if target-specific splitting is required */ - if (ti->max_io_len) - ti_limits.chunk_sectors = lcm_not_zero(ti->max_io_len, - ti_limits.chunk_sectors); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 9ae4ce7df95c..d5223a0e5cc5 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -319,7 +319,7 @@ err1: #else static int persistent_memory_claim(struct dm_writecache *wc) { - BUG(); + return -EOPNOTSUPP; } #endif @@ -2041,7 +2041,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) struct wc_memory_superblock s; static struct dm_arg _args[] = { - {0, 10, "Invalid number of feature args"}, + {0, 16, "Invalid number of feature args"}, }; as.argc = argc; @@ -2479,6 +2479,8 @@ static void writecache_status(struct dm_target *ti, status_type_t type, extra_args += 2; if (wc->autocommit_time_set) extra_args += 2; + if (wc->max_age != MAX_AGE_UNSPECIFIED) + extra_args += 2; if (wc->cleaner) extra_args++; if (wc->writeback_fua_set) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c18fc2548518..4e0cbfe3f14d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -476,8 +476,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, return -EAGAIN; map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; + if (!map) { + ret = -EIO; + goto out; + } do { struct dm_target *tgt; @@ -507,7 +509,6 @@ out: static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, struct block_device **bdev) - __acquires(md->io_barrier) { struct dm_target *tgt; struct dm_table *map; @@ -541,7 +542,6 @@ retry: } static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) - __releases(md->io_barrier) { dm_put_live_table(md, srcu_idx); } @@ -1037,15 +1037,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) sector_t max_len; /* - * Does the target need to split even further? - * - q->limits.chunk_sectors reflects ti->max_io_len so - * blk_max_size_offset() provides required splitting. - * - blk_max_size_offset() also respects q->limits.max_sectors + * Does the target need to split IO even further? + * - varied (per target) IO splitting is a tenet of DM; this + * explains why stacked chunk_sectors based splitting via + * blk_max_size_offset() isn't possible here. So pass in + * ti->max_io_len to override stacked chunk_sectors. */ - max_len = blk_max_size_offset(ti->table->md->queue, - target_offset); - if (len > max_len) - len = max_len; + if (ti->max_io_len) { + max_len = blk_max_size_offset(ti->table->md->queue, + target_offset, ti->max_io_len); + if (len > max_len) + len = max_len; + } return len; } @@ -1196,11 +1199,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, * ->zero_page_range() is mandatory dax operation. If we are * here, something is wrong. */ - dm_put_live_table(md, srcu_idx); goto out; } ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); - out: dm_put_live_table(md, srcu_idx); diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c index e4d8446b87da..04b13cdc38d2 100644 --- a/drivers/media/cec/usb/pulse8/pulse8-cec.c +++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c @@ -88,13 +88,15 @@ enum pulse8_msgcodes { MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */ MSGCODE_GET_DEVICE_TYPE, MSGCODE_SET_DEVICE_TYPE, - MSGCODE_GET_HDMI_VERSION, + MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */ MSGCODE_SET_HDMI_VERSION, MSGCODE_GET_OSD_NAME, MSGCODE_SET_OSD_NAME, MSGCODE_WRITE_EEPROM, MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */ MSGCODE_SET_ACTIVE_SOURCE, + MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */ + MSGCODE_SET_AUTO_POWER_ON, MSGCODE_FRAME_EOM = 0x80, MSGCODE_FRAME_ACK = 0x40, @@ -143,6 +145,8 @@ static const char * const pulse8_msgnames[] = { "WRITE_EEPROM", "GET_ADAPTER_TYPE", "SET_ACTIVE_SOURCE", + "GET_AUTO_POWER_ON", + "SET_AUTO_POWER_ON", }; static const char *pulse8_msgname(u8 cmd) @@ -579,12 +583,14 @@ static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) if (err) goto unlock; - cmd[0] = MSGCODE_SET_HDMI_VERSION; - cmd[1] = adap->log_addrs.cec_version; - err = pulse8_send_and_wait(pulse8, cmd, 2, - MSGCODE_COMMAND_ACCEPTED, 0); - if (err) - goto unlock; + if (pulse8->vers < 10) { + cmd[0] = MSGCODE_SET_HDMI_VERSION; + cmd[1] = adap->log_addrs.cec_version; + err = pulse8_send_and_wait(pulse8, cmd, 2, + MSGCODE_COMMAND_ACCEPTED, 0); + if (err) + goto unlock; + } if (adap->log_addrs.osd_name[0]) { size_t osd_len = strlen(adap->log_addrs.osd_name); @@ -650,7 +656,6 @@ static void pulse8_disconnect(struct serio *serio) struct pulse8 *pulse8 = serio_get_drvdata(serio); cec_unregister_adapter(pulse8->adap); - pulse8->serio = NULL; serio_set_drvdata(serio, NULL); serio_close(serio); } @@ -692,6 +697,14 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, dev_dbg(pulse8->dev, "Autonomous mode: %s", data[0] ? "on" : "off"); + if (pulse8->vers >= 10) { + cmd[0] = MSGCODE_GET_AUTO_POWER_ON; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (!err) + dev_dbg(pulse8->dev, "Auto Power On: %s", + data[0] ? "on" : "off"); + } + cmd[0] = MSGCODE_GET_DEVICE_TYPE; err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); if (err) @@ -753,12 +766,15 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n", cec_phys_addr_exp(*pa)); - cmd[0] = MSGCODE_GET_HDMI_VERSION; - err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); - if (err) - return err; - log_addrs->cec_version = data[0]; - dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version); + log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4; + if (pulse8->vers < 10) { + cmd[0] = MSGCODE_GET_HDMI_VERSION; + err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1); + if (err) + return err; + log_addrs->cec_version = data[0]; + dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version); + } cmd[0] = MSGCODE_GET_OSD_NAME; err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0); @@ -830,8 +846,10 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv) pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8, dev_name(&serio->dev), caps, 1); err = PTR_ERR_OR_ZERO(pulse8->adap); - if (err < 0) - goto free_device; + if (err < 0) { + kfree(pulse8); + return err; + } pulse8->dev = &serio->dev; serio_set_drvdata(serio, pulse8); @@ -874,8 +892,6 @@ close_serio: serio_close(serio); delete_adap: cec_delete_adapter(pulse8->adap); -free_device: - kfree(pulse8); return err; } diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 4eab6d81cce1..89e38392509c 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -414,6 +414,17 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, vb->index = q->num_buffers + buffer; vb->type = q->type; vb->memory = memory; + /* + * We need to set these flags here so that the videobuf2 core + * will call ->prepare()/->finish() cache sync/flush on vb2 + * buffers when appropriate. However, we can avoid explicit + * ->prepare() and ->finish() cache sync for DMABUF buffers, + * because DMA exporter takes care of it. + */ + if (q->memory != VB2_MEMORY_DMABUF) { + vb->need_cache_sync_on_prepare = 1; + vb->need_cache_sync_on_finish = 1; + } for (plane = 0; plane < num_planes; ++plane) { vb->planes[plane].length = plane_sizes[plane]; vb->planes[plane].min_length = plane_sizes[plane]; diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c index 5051a5e5244b..65a136c0fac2 100644 --- a/drivers/media/rc/mtk-cir.c +++ b/drivers/media/rc/mtk-cir.c @@ -151,15 +151,12 @@ static inline u32 mtk_chk_period(struct mtk_ir *ir) { u32 val; - /* Period of raw software sampling in ns */ - val = DIV_ROUND_CLOSEST(1000000000ul, - clk_get_rate(ir->bus) / ir->data->div); - /* * Period for software decoder used in the * unit of raw software sampling */ - val = DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, val); + val = DIV_ROUND_CLOSEST(clk_get_rate(ir->bus), + USEC_PER_SEC * ir->data->div / MTK_IR_SAMPLE); dev_dbg(ir->dev, "@pwm clk = \t%lu\n", clk_get_rate(ir->bus) / ir->data->div); @@ -412,7 +409,7 @@ static int mtk_ir_probe(struct platform_device *pdev) mtk_irq_enable(ir, MTK_IRINT_EN); dev_info(dev, "Initialized MT7623 IR driver, sample period = %dus\n", - DIV_ROUND_CLOSEST(MTK_IR_SAMPLE, 1000)); + MTK_IR_SAMPLE); return 0; diff --git a/drivers/media/test-drivers/vidtv/vidtv_channel.c b/drivers/media/test-drivers/vidtv/vidtv_channel.c index 8ad6c0744d36..7838e6272712 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_channel.c +++ b/drivers/media/test-drivers/vidtv/vidtv_channel.c @@ -504,11 +504,11 @@ void vidtv_channel_si_destroy(struct vidtv_mux *m) { u32 i; - vidtv_psi_pat_table_destroy(m->si.pat); - for (i = 0; i < m->si.pat->num_pmt; ++i) vidtv_psi_pmt_table_destroy(m->si.pmt_secs[i]); + vidtv_psi_pat_table_destroy(m->si.pat); + kfree(m->si.pmt_secs); vidtv_psi_sdt_table_destroy(m->si.sdt); vidtv_psi_nit_table_destroy(m->si.nit); diff --git a/drivers/media/test-drivers/vidtv/vidtv_psi.h b/drivers/media/test-drivers/vidtv/vidtv_psi.h index 340c9fb8d583..fdc825e54138 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_psi.h +++ b/drivers/media/test-drivers/vidtv/vidtv_psi.h @@ -420,7 +420,7 @@ void vidtv_psi_desc_assign(struct vidtv_psi_desc **to, struct vidtv_psi_desc *desc); /** - * vidtv_psi_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section. + * vidtv_pmt_desc_assign - Assigns a descriptor loop at some point in a PMT section. * @pmt: The PMT section that will contain the descriptor loop * @to: Where in the PMT to assign this descriptor loop to * @desc: The descriptor loop that will be assigned. @@ -434,7 +434,7 @@ void vidtv_pmt_desc_assign(struct vidtv_psi_table_pmt *pmt, struct vidtv_psi_desc *desc); /** - * vidtv_psi_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT. + * vidtv_sdt_desc_assign - Assigns a descriptor loop at some point in a SDT. * @sdt: The SDT that will contain the descriptor loop * @to: Where in the PMT to assign this descriptor loop to * @desc: The descriptor loop that will be assigned. @@ -474,7 +474,7 @@ void vidtv_psi_pmt_stream_assign(struct vidtv_psi_table_pmt *pmt, struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc); /** - * vidtv_psi_create_sec_for_each_pat_entry - Create a PMT section for each + * vidtv_psi_pmt_create_sec_for_each_pat_entry - Create a PMT section for each * program found in the PAT * @pat: The PAT to look for programs. * @pcr_pid: packet ID for the PCR to be used for the program described in this @@ -743,7 +743,7 @@ struct vidtv_psi_table_eit { struct vidtv_psi_table_eit *vidtv_psi_eit_table_init(u16 network_id, u16 transport_stream_id, - u16 service_id); + __be16 service_id); /** * struct vidtv_psi_eit_write_args - Arguments for writing an EIT section diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c index ce7dd6cafc8b..d79b65854627 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c +++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c @@ -467,8 +467,10 @@ struct vidtv_encoder e->is_video_encoder = false; ctx = kzalloc(priv_sz, GFP_KERNEL); - if (!ctx) + if (!ctx) { + kfree(e); return NULL; + } e->ctx = ctx; ctx->last_duration = 0; diff --git a/drivers/media/test-drivers/vidtv/vidtv_ts.h b/drivers/media/test-drivers/vidtv/vidtv_ts.h index 10838a2b8389..f5e8e1f37f05 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_ts.h +++ b/drivers/media/test-drivers/vidtv/vidtv_ts.h @@ -44,7 +44,7 @@ struct vidtv_mpeg_ts { u8 adaptation_field:1; u8 scrambling:2; } __packed; - struct vidtv_mpeg_ts_adaption adaption[]; + struct vidtv_mpeg_ts_adaption *adaption; } __packed; /** diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 20572224099a..783bbdcb1e61 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -231,16 +231,16 @@ delete_cdev_device: static void device_cdev_sysfs_del(struct hl_device *hdev) { - /* device_release() won't be called so must free devices explicitly */ - if (!hdev->cdev_sysfs_created) { - kfree(hdev->dev_ctrl); - kfree(hdev->dev); - return; - } + if (!hdev->cdev_sysfs_created) + goto put_devices; hl_sysfs_fini(hdev); cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); cdev_device_del(&hdev->cdev, hdev->dev); + +put_devices: + put_device(hdev->dev); + put_device(hdev->dev_ctrl); } /* @@ -1371,9 +1371,9 @@ sw_fini: early_fini: device_early_fini(hdev); free_dev_ctrl: - kfree(hdev->dev_ctrl); + put_device(hdev->dev_ctrl); free_dev: - kfree(hdev->dev); + put_device(hdev->dev); out_disabled: hdev->disabled = true; if (add_cdev_sysfs_on_err) diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 84227819e4d1..bfe223abf142 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1626,6 +1626,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, goto host_hpage_range_err; } } else { + kfree(ctx->host_huge_va_range); ctx->host_huge_va_range = ctx->host_va_range; } diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c06581ffa7bd..f5fd5b786607 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -46,14 +46,4 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail -config INTEL_MEI_VIRTIO - tristate "Intel MEI interface emulation with virtio framework" - select INTEL_MEI - depends on X86 && PCI && VIRTIO_PCI - help - This module implements mei hw emulation over virtio transport. - The module will be called mei_virtio. - Enable this if your virtual machine supports virtual mei - device over virtio. - source "drivers/misc/mei/hdcp/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 52aefaab5c1b..f1c76f7ee804 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -22,9 +22,6 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o mei-txe-objs := pci-txe.o mei-txe-objs += hw-txe.o -obj-$(CONFIG_INTEL_MEI_VIRTIO) += mei-virtio.o -mei-virtio-objs := hw-virtio.o - mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) diff --git a/drivers/misc/mei/hw-virtio.c b/drivers/misc/mei/hw-virtio.c deleted file mode 100644 index 899dc1c5e7ca..000000000000 --- a/drivers/misc/mei/hw-virtio.c +++ /dev/null @@ -1,874 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Intel Management Engine Interface (Intel MEI) Linux driver - * Copyright (c) 2018-2020, Intel Corporation. - */ -#include <linux/err.h> -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/scatterlist.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/virtio.h> -#include <linux/virtio_config.h> -#include <linux/virtio_ids.h> -#include <linux/atomic.h> - -#include "mei_dev.h" -#include "hbm.h" -#include "client.h" - -#define MEI_VIRTIO_RPM_TIMEOUT 500 -/* ACRN virtio device types */ -#ifndef VIRTIO_ID_MEI -#define VIRTIO_ID_MEI 0xFFFE /* virtio mei */ -#endif - -/** - * struct mei_virtio_cfg - settings passed from the virtio backend - * @buf_depth: read buffer depth in slots (4bytes) - * @hw_ready: hw is ready for operation - * @host_reset: synchronize reset with virtio backend - * @reserved: reserved for alignment - * @fw_status: FW status - */ -struct mei_virtio_cfg { - u32 buf_depth; - u8 hw_ready; - u8 host_reset; - u8 reserved[2]; - u32 fw_status[MEI_FW_STATUS_MAX]; -} __packed; - -struct mei_virtio_hw { - struct mei_device mdev; - char name[32]; - - struct virtqueue *in; - struct virtqueue *out; - - bool host_ready; - struct work_struct intr_handler; - - u32 *recv_buf; - u8 recv_rdy; - size_t recv_sz; - u32 recv_idx; - u32 recv_len; - - /* send buffer */ - atomic_t hbuf_ready; - const void *send_hdr; - const void *send_buf; - - struct mei_virtio_cfg cfg; -}; - -#define to_virtio_hw(_dev) container_of(_dev, struct mei_virtio_hw, mdev) - -/** - * mei_virtio_fw_status() - read status register of mei - * @dev: mei device - * @fw_status: fw status register values - * - * Return: always 0 - */ -static int mei_virtio_fw_status(struct mei_device *dev, - struct mei_fw_status *fw_status) -{ - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - fw_status->count = MEI_FW_STATUS_MAX; - virtio_cread_bytes(vdev, offsetof(struct mei_virtio_cfg, fw_status), - fw_status->status, sizeof(fw_status->status)); - return 0; -} - -/** - * mei_virtio_pg_state() - translate internal pg state - * to the mei power gating state - * There is no power management in ACRN mode always return OFF - * @dev: mei device - * - * Return: - * * MEI_PG_OFF - if aliveness is on (always) - * * MEI_PG_ON - (never) - */ -static inline enum mei_pg_state mei_virtio_pg_state(struct mei_device *dev) -{ - return MEI_PG_OFF; -} - -/** - * mei_virtio_hw_config() - configure hw dependent settings - * - * @dev: mei device - * - * Return: always 0 - */ -static int mei_virtio_hw_config(struct mei_device *dev) -{ - return 0; -} - -/** - * mei_virtio_hbuf_empty_slots() - counts write empty slots. - * @dev: the device structure - * - * Return: always return frontend buf size if buffer is ready, 0 otherwise - */ -static int mei_virtio_hbuf_empty_slots(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - return (atomic_read(&hw->hbuf_ready) == 1) ? hw->cfg.buf_depth : 0; -} - -/** - * mei_virtio_hbuf_is_ready() - checks if write buffer is ready - * @dev: the device structure - * - * Return: true if hbuf is ready - */ -static bool mei_virtio_hbuf_is_ready(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - return atomic_read(&hw->hbuf_ready) == 1; -} - -/** - * mei_virtio_hbuf_max_depth() - returns depth of FE write buffer. - * @dev: the device structure - * - * Return: size of frontend write buffer in bytes - */ -static u32 mei_virtio_hbuf_depth(const struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - return hw->cfg.buf_depth; -} - -/** - * mei_virtio_intr_clear() - clear and stop interrupts - * @dev: the device structure - */ -static void mei_virtio_intr_clear(struct mei_device *dev) -{ - /* - * In our virtio solution, there are two types of interrupts, - * vq interrupt and config change interrupt. - * 1) start/reset rely on virtio config changed interrupt; - * 2) send/recv rely on virtio virtqueue interrupts. - * They are all virtual interrupts. So, we don't have corresponding - * operation to do here. - */ -} - -/** - * mei_virtio_intr_enable() - enables mei BE virtqueues callbacks - * @dev: the device structure - */ -static void mei_virtio_intr_enable(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - virtio_config_enable(vdev); - - virtqueue_enable_cb(hw->in); - virtqueue_enable_cb(hw->out); -} - -/** - * mei_virtio_intr_disable() - disables mei BE virtqueues callbacks - * - * @dev: the device structure - */ -static void mei_virtio_intr_disable(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - virtio_config_disable(vdev); - - virtqueue_disable_cb(hw->in); - virtqueue_disable_cb(hw->out); -} - -/** - * mei_virtio_synchronize_irq() - wait for pending IRQ handlers for all - * virtqueue - * @dev: the device structure - */ -static void mei_virtio_synchronize_irq(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - /* - * Now, all IRQ handlers are converted to workqueue. - * Change synchronize irq to flush this work. - */ - flush_work(&hw->intr_handler); -} - -static void mei_virtio_free_outbufs(struct mei_virtio_hw *hw) -{ - kfree(hw->send_hdr); - kfree(hw->send_buf); - hw->send_hdr = NULL; - hw->send_buf = NULL; -} - -/** - * mei_virtio_write_message() - writes a message to mei virtio back-end service. - * @dev: the device structure - * @hdr: mei header of message - * @hdr_len: header length - * @data: message payload will be written - * @data_len: message payload length - * - * Return: - * * 0: on success - * * -EIO: if write has failed - * * -ENOMEM: on memory allocation failure - */ -static int mei_virtio_write_message(struct mei_device *dev, - const void *hdr, size_t hdr_len, - const void *data, size_t data_len) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct scatterlist sg[2]; - const void *hbuf, *dbuf; - int ret; - - if (WARN_ON(!atomic_add_unless(&hw->hbuf_ready, -1, 0))) - return -EIO; - - hbuf = kmemdup(hdr, hdr_len, GFP_KERNEL); - hw->send_hdr = hbuf; - - dbuf = kmemdup(data, data_len, GFP_KERNEL); - hw->send_buf = dbuf; - - if (!hbuf || !dbuf) { - ret = -ENOMEM; - goto fail; - } - - sg_init_table(sg, 2); - sg_set_buf(&sg[0], hbuf, hdr_len); - sg_set_buf(&sg[1], dbuf, data_len); - - ret = virtqueue_add_outbuf(hw->out, sg, 2, hw, GFP_KERNEL); - if (ret) { - dev_err(dev->dev, "failed to add outbuf\n"); - goto fail; - } - - virtqueue_kick(hw->out); - return 0; -fail: - - mei_virtio_free_outbufs(hw); - - return ret; -} - -/** - * mei_virtio_count_full_read_slots() - counts read full slots. - * @dev: the device structure - * - * Return: -EOVERFLOW if overflow, otherwise filled slots count - */ -static int mei_virtio_count_full_read_slots(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - if (hw->recv_idx > hw->recv_len) - return -EOVERFLOW; - - return hw->recv_len - hw->recv_idx; -} - -/** - * mei_virtio_read_hdr() - Reads 32bit dword from mei virtio receive buffer - * - * @dev: the device structure - * - * Return: 32bit dword of receive buffer (u32) - */ -static inline u32 mei_virtio_read_hdr(const struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - WARN_ON(hw->cfg.buf_depth < hw->recv_idx + 1); - - return hw->recv_buf[hw->recv_idx++]; -} - -static int mei_virtio_read(struct mei_device *dev, unsigned char *buffer, - unsigned long len) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - u32 slots = mei_data2slots(len); - - if (WARN_ON(hw->cfg.buf_depth < hw->recv_idx + slots)) - return -EOVERFLOW; - - /* - * Assumption: There is only one MEI message in recv_buf each time. - * Backend service need follow this rule too. - */ - memcpy(buffer, hw->recv_buf + hw->recv_idx, len); - hw->recv_idx += slots; - - return 0; -} - -static bool mei_virtio_pg_is_enabled(struct mei_device *dev) -{ - return false; -} - -static bool mei_virtio_pg_in_transition(struct mei_device *dev) -{ - return false; -} - -static void mei_virtio_add_recv_buf(struct mei_virtio_hw *hw) -{ - struct scatterlist sg; - - if (hw->recv_rdy) /* not needed */ - return; - - /* refill the recv_buf to IN virtqueue to get next message */ - sg_init_one(&sg, hw->recv_buf, mei_slots2data(hw->cfg.buf_depth)); - hw->recv_len = 0; - hw->recv_idx = 0; - hw->recv_rdy = 1; - virtqueue_add_inbuf(hw->in, &sg, 1, hw->recv_buf, GFP_KERNEL); - virtqueue_kick(hw->in); -} - -/** - * mei_virtio_hw_is_ready() - check whether the BE(hw) has turned ready - * @dev: mei device - * Return: bool - */ -static bool mei_virtio_hw_is_ready(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - virtio_cread(vdev, struct mei_virtio_cfg, - hw_ready, &hw->cfg.hw_ready); - - dev_dbg(dev->dev, "hw ready %d\n", hw->cfg.hw_ready); - - return hw->cfg.hw_ready; -} - -/** - * mei_virtio_hw_reset - resets virtio hw. - * - * @dev: the device structure - * @intr_enable: virtio use data/config callbacks - * - * Return: 0 on success an error code otherwise - */ -static int mei_virtio_hw_reset(struct mei_device *dev, bool intr_enable) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - dev_dbg(dev->dev, "hw reset\n"); - - dev->recvd_hw_ready = false; - hw->host_ready = false; - atomic_set(&hw->hbuf_ready, 0); - hw->recv_len = 0; - hw->recv_idx = 0; - - hw->cfg.host_reset = 1; - virtio_cwrite(vdev, struct mei_virtio_cfg, - host_reset, &hw->cfg.host_reset); - - mei_virtio_hw_is_ready(dev); - - if (intr_enable) - mei_virtio_intr_enable(dev); - - return 0; -} - -/** - * mei_virtio_hw_reset_release() - release device from the reset - * @dev: the device structure - */ -static void mei_virtio_hw_reset_release(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - struct virtio_device *vdev = dev_to_virtio(dev->dev); - - dev_dbg(dev->dev, "hw reset release\n"); - hw->cfg.host_reset = 0; - virtio_cwrite(vdev, struct mei_virtio_cfg, - host_reset, &hw->cfg.host_reset); -} - -/** - * mei_virtio_hw_ready_wait() - wait until the virtio(hw) has turned ready - * or timeout is reached - * @dev: mei device - * - * Return: 0 on success, error otherwise - */ -static int mei_virtio_hw_ready_wait(struct mei_device *dev) -{ - mutex_unlock(&dev->device_lock); - wait_event_timeout(dev->wait_hw_ready, - dev->recvd_hw_ready, - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); - mutex_lock(&dev->device_lock); - if (!dev->recvd_hw_ready) { - dev_err(dev->dev, "wait hw ready failed\n"); - return -ETIMEDOUT; - } - - dev->recvd_hw_ready = false; - return 0; -} - -/** - * mei_virtio_hw_start() - hw start routine - * @dev: mei device - * - * Return: 0 on success, error otherwise - */ -static int mei_virtio_hw_start(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - int ret; - - dev_dbg(dev->dev, "hw start\n"); - mei_virtio_hw_reset_release(dev); - - ret = mei_virtio_hw_ready_wait(dev); - if (ret) - return ret; - - mei_virtio_add_recv_buf(hw); - atomic_set(&hw->hbuf_ready, 1); - dev_dbg(dev->dev, "hw is ready\n"); - hw->host_ready = true; - - return 0; -} - -/** - * mei_virtio_host_is_ready() - check whether the FE has turned ready - * @dev: mei device - * - * Return: bool - */ -static bool mei_virtio_host_is_ready(struct mei_device *dev) -{ - struct mei_virtio_hw *hw = to_virtio_hw(dev); - - dev_dbg(dev->dev, "host ready %d\n", hw->host_ready); - - return hw->host_ready; -} - -/** - * mei_virtio_data_in() - The callback of recv virtqueue of virtio mei - * @vq: receiving virtqueue - */ -static void mei_virtio_data_in(struct virtqueue *vq) -{ - struct mei_virtio_hw *hw = vq->vdev->priv; - - /* disable interrupts (enabled again from in the interrupt worker) */ - virtqueue_disable_cb(hw->in); - - schedule_work(&hw->intr_handler); -} - -/** - * mei_virtio_data_out() - The callback of send virtqueue of virtio mei - * @vq: transmitting virtqueue - */ -static void mei_virtio_data_out(struct virtqueue *vq) -{ - struct mei_virtio_hw *hw = vq->vdev->priv; - - schedule_work(&hw->intr_handler); -} - -static void mei_virtio_intr_handler(struct work_struct *work) -{ - struct mei_virtio_hw *hw = - container_of(work, struct mei_virtio_hw, intr_handler); - struct mei_device *dev = &hw->mdev; - LIST_HEAD(complete_list); - s32 slots; - int rets = 0; - void *data; - unsigned int len; - - mutex_lock(&dev->device_lock); - - if (dev->dev_state == MEI_DEV_DISABLED) { - dev_warn(dev->dev, "Interrupt in disabled state.\n"); - mei_virtio_intr_disable(dev); - goto end; - } - - /* check if ME wants a reset */ - if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "BE service not ready: resetting.\n"); - schedule_work(&dev->reset_work); - goto end; - } - - /* check if we need to start the dev */ - if (!mei_host_is_ready(dev)) { - if (mei_hw_is_ready(dev)) { - dev_dbg(dev->dev, "we need to start the dev.\n"); - dev->recvd_hw_ready = true; - wake_up(&dev->wait_hw_ready); - } else { - dev_warn(dev->dev, "Spurious Interrupt\n"); - } - goto end; - } - - /* read */ - if (hw->recv_rdy) { - data = virtqueue_get_buf(hw->in, &len); - if (!data || !len) { - dev_dbg(dev->dev, "No data %d", len); - } else { - dev_dbg(dev->dev, "data_in %d\n", len); - WARN_ON(data != hw->recv_buf); - hw->recv_len = mei_data2slots(len); - hw->recv_rdy = 0; - } - } - - /* write */ - if (!atomic_read(&hw->hbuf_ready)) { - if (!virtqueue_get_buf(hw->out, &len)) { - dev_warn(dev->dev, "Failed to getbuf\n"); - } else { - mei_virtio_free_outbufs(hw); - atomic_inc(&hw->hbuf_ready); - } - } - - /* check slots available for reading */ - slots = mei_count_full_read_slots(dev); - while (slots > 0) { - dev_dbg(dev->dev, "slots to read = %08x\n", slots); - rets = mei_irq_read_handler(dev, &complete_list, &slots); - - if (rets && - (dev->dev_state != MEI_DEV_RESETTING && - dev->dev_state != MEI_DEV_POWER_DOWN)) { - dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n", - rets); - schedule_work(&dev->reset_work); - goto end; - } - } - - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); - - mei_irq_write_handler(dev, &complete_list); - - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); - - mei_irq_compl_handler(dev, &complete_list); - - mei_virtio_add_recv_buf(hw); - -end: - if (dev->dev_state != MEI_DEV_DISABLED) { - if (!virtqueue_enable_cb(hw->in)) - schedule_work(&hw->intr_handler); - } - - mutex_unlock(&dev->device_lock); -} - -static void mei_virtio_config_changed(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw = vdev->priv; - struct mei_device *dev = &hw->mdev; - - virtio_cread(vdev, struct mei_virtio_cfg, - hw_ready, &hw->cfg.hw_ready); - - if (dev->dev_state == MEI_DEV_DISABLED) { - dev_dbg(dev->dev, "disabled state don't start\n"); - return; - } - - /* Run intr handler once to handle reset notify */ - schedule_work(&hw->intr_handler); -} - -static void mei_virtio_remove_vqs(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw = vdev->priv; - - virtqueue_detach_unused_buf(hw->in); - hw->recv_len = 0; - hw->recv_idx = 0; - hw->recv_rdy = 0; - - virtqueue_detach_unused_buf(hw->out); - - mei_virtio_free_outbufs(hw); - - vdev->config->del_vqs(vdev); -} - -/* - * There are two virtqueues, one is for send and another is for recv. - */ -static int mei_virtio_init_vqs(struct mei_virtio_hw *hw, - struct virtio_device *vdev) -{ - struct virtqueue *vqs[2]; - - vq_callback_t *cbs[] = { - mei_virtio_data_in, - mei_virtio_data_out, - }; - static const char * const names[] = { - "in", - "out", - }; - int ret; - - ret = virtio_find_vqs(vdev, 2, vqs, cbs, names, NULL); - if (ret) - return ret; - - hw->in = vqs[0]; - hw->out = vqs[1]; - - return 0; -} - -static const struct mei_hw_ops mei_virtio_ops = { - .fw_status = mei_virtio_fw_status, - .pg_state = mei_virtio_pg_state, - - .host_is_ready = mei_virtio_host_is_ready, - - .hw_is_ready = mei_virtio_hw_is_ready, - .hw_reset = mei_virtio_hw_reset, - .hw_config = mei_virtio_hw_config, - .hw_start = mei_virtio_hw_start, - - .pg_in_transition = mei_virtio_pg_in_transition, - .pg_is_enabled = mei_virtio_pg_is_enabled, - - .intr_clear = mei_virtio_intr_clear, - .intr_enable = mei_virtio_intr_enable, - .intr_disable = mei_virtio_intr_disable, - .synchronize_irq = mei_virtio_synchronize_irq, - - .hbuf_free_slots = mei_virtio_hbuf_empty_slots, - .hbuf_is_ready = mei_virtio_hbuf_is_ready, - .hbuf_depth = mei_virtio_hbuf_depth, - - .write = mei_virtio_write_message, - - .rdbuf_full_slots = mei_virtio_count_full_read_slots, - .read_hdr = mei_virtio_read_hdr, - .read = mei_virtio_read, -}; - -static int mei_virtio_probe(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw; - int ret; - - hw = devm_kzalloc(&vdev->dev, sizeof(*hw), GFP_KERNEL); - if (!hw) - return -ENOMEM; - - vdev->priv = hw; - - INIT_WORK(&hw->intr_handler, mei_virtio_intr_handler); - - ret = mei_virtio_init_vqs(hw, vdev); - if (ret) - goto vqs_failed; - - virtio_cread(vdev, struct mei_virtio_cfg, - buf_depth, &hw->cfg.buf_depth); - - hw->recv_buf = kzalloc(mei_slots2data(hw->cfg.buf_depth), GFP_KERNEL); - if (!hw->recv_buf) { - ret = -ENOMEM; - goto hbuf_failed; - } - atomic_set(&hw->hbuf_ready, 0); - - virtio_device_ready(vdev); - - mei_device_init(&hw->mdev, &vdev->dev, &mei_virtio_ops); - - pm_runtime_get_noresume(&vdev->dev); - pm_runtime_set_active(&vdev->dev); - pm_runtime_enable(&vdev->dev); - - ret = mei_start(&hw->mdev); - if (ret) - goto mei_start_failed; - - pm_runtime_set_autosuspend_delay(&vdev->dev, MEI_VIRTIO_RPM_TIMEOUT); - pm_runtime_use_autosuspend(&vdev->dev); - - ret = mei_register(&hw->mdev, &vdev->dev); - if (ret) - goto mei_failed; - - pm_runtime_put(&vdev->dev); - - return 0; - -mei_failed: - mei_stop(&hw->mdev); -mei_start_failed: - mei_cancel_work(&hw->mdev); - mei_disable_interrupts(&hw->mdev); - kfree(hw->recv_buf); -hbuf_failed: - vdev->config->del_vqs(vdev); -vqs_failed: - return ret; -} - -static int __maybe_unused mei_virtio_pm_runtime_idle(struct device *device) -{ - struct virtio_device *vdev = dev_to_virtio(device); - struct mei_virtio_hw *hw = vdev->priv; - - dev_dbg(&vdev->dev, "rpm: mei_virtio : runtime_idle\n"); - - if (!hw) - return -ENODEV; - - if (mei_write_is_idle(&hw->mdev)) - pm_runtime_autosuspend(device); - - return -EBUSY; -} - -static int __maybe_unused mei_virtio_pm_runtime_suspend(struct device *device) -{ - return 0; -} - -static int __maybe_unused mei_virtio_pm_runtime_resume(struct device *device) -{ - return 0; -} - -static int __maybe_unused mei_virtio_freeze(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw = vdev->priv; - - dev_dbg(&vdev->dev, "freeze\n"); - - if (!hw) - return -ENODEV; - - mei_stop(&hw->mdev); - mei_disable_interrupts(&hw->mdev); - cancel_work_sync(&hw->intr_handler); - vdev->config->reset(vdev); - mei_virtio_remove_vqs(vdev); - - return 0; -} - -static int __maybe_unused mei_virtio_restore(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw = vdev->priv; - int ret; - - dev_dbg(&vdev->dev, "restore\n"); - - if (!hw) - return -ENODEV; - - ret = mei_virtio_init_vqs(hw, vdev); - if (ret) - return ret; - - virtio_device_ready(vdev); - - ret = mei_restart(&hw->mdev); - if (ret) - return ret; - - /* Start timer if stopped in suspend */ - schedule_delayed_work(&hw->mdev.timer_work, HZ); - - return 0; -} - -static const struct dev_pm_ops mei_virtio_pm_ops = { - SET_RUNTIME_PM_OPS(mei_virtio_pm_runtime_suspend, - mei_virtio_pm_runtime_resume, - mei_virtio_pm_runtime_idle) -}; - -static void mei_virtio_remove(struct virtio_device *vdev) -{ - struct mei_virtio_hw *hw = vdev->priv; - - mei_stop(&hw->mdev); - mei_disable_interrupts(&hw->mdev); - cancel_work_sync(&hw->intr_handler); - mei_deregister(&hw->mdev); - vdev->config->reset(vdev); - mei_virtio_remove_vqs(vdev); - kfree(hw->recv_buf); - pm_runtime_disable(&vdev->dev); -} - -static struct virtio_device_id id_table[] = { - { VIRTIO_ID_MEI, VIRTIO_DEV_ANY_ID }, - { } -}; - -static struct virtio_driver mei_virtio_driver = { - .id_table = id_table, - .probe = mei_virtio_probe, - .remove = mei_virtio_remove, - .config_changed = mei_virtio_config_changed, - .driver = { - .name = KBUILD_MODNAME, - .owner = THIS_MODULE, - .pm = &mei_virtio_pm_ops, - }, -#ifdef CONFIG_PM_SLEEP - .freeze = mei_virtio_freeze, - .restore = mei_virtio_restore, -#endif -}; - -module_virtio_driver(mei_virtio_driver); -MODULE_DEVICE_TABLE(virtio, id_table); -MODULE_DESCRIPTION("Virtio MEI frontend driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8d3df0be0355..42e27a298218 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -580,7 +580,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) { + if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Ensure RPMB/R1B command has completed by polling CMD13 * "Send Status". diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index a704745e5882..004fbfc23672 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -446,7 +446,7 @@ struct msdc_host { static const struct mtk_mmc_compatible mt8135_compat = { .clk_div_bits = 8, - .recheck_sdio_irq = false, + .recheck_sdio_irq = true, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE, .async_fifo = false, @@ -485,7 +485,7 @@ static const struct mtk_mmc_compatible mt8183_compat = { static const struct mtk_mmc_compatible mt2701_compat = { .clk_div_bits = 12, - .recheck_sdio_irq = false, + .recheck_sdio_irq = true, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE0, .async_fifo = true, @@ -511,7 +511,7 @@ static const struct mtk_mmc_compatible mt2712_compat = { static const struct mtk_mmc_compatible mt7622_compat = { .clk_div_bits = 12, - .recheck_sdio_irq = false, + .recheck_sdio_irq = true, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE0, .async_fifo = true, @@ -524,7 +524,7 @@ static const struct mtk_mmc_compatible mt7622_compat = { static const struct mtk_mmc_compatible mt8516_compat = { .clk_div_bits = 12, - .recheck_sdio_irq = false, + .recheck_sdio_irq = true, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE0, .async_fifo = true, @@ -535,7 +535,7 @@ static const struct mtk_mmc_compatible mt8516_compat = { static const struct mtk_mmc_compatible mt7620_compat = { .clk_div_bits = 8, - .recheck_sdio_irq = false, + .recheck_sdio_irq = true, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE, .async_fifo = false, @@ -548,6 +548,7 @@ static const struct mtk_mmc_compatible mt7620_compat = { static const struct mtk_mmc_compatible mt6779_compat = { .clk_div_bits = 12, + .recheck_sdio_irq = false, .hs400_tune = false, .pad_tune_reg = MSDC_PAD_TUNE0, .async_fifo = true, @@ -2603,7 +2604,6 @@ static int msdc_drv_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM static void msdc_save_reg(struct msdc_host *host) { u32 tune_reg = host->dev_comp->pad_tune_reg; @@ -2662,7 +2662,7 @@ static void msdc_restore_reg(struct msdc_host *host) __msdc_enable_sdio_irq(host, 1); } -static int msdc_runtime_suspend(struct device *dev) +static int __maybe_unused msdc_runtime_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msdc_host *host = mmc_priv(mmc); @@ -2672,7 +2672,7 @@ static int msdc_runtime_suspend(struct device *dev) return 0; } -static int msdc_runtime_resume(struct device *dev) +static int __maybe_unused msdc_runtime_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct msdc_host *host = mmc_priv(mmc); @@ -2681,11 +2681,28 @@ static int msdc_runtime_resume(struct device *dev) msdc_restore_reg(host); return 0; } -#endif + +static int __maybe_unused msdc_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + int ret; + + if (mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(mmc); + if (ret) + return ret; + } + + return pm_runtime_force_suspend(dev); +} + +static int __maybe_unused msdc_resume(struct device *dev) +{ + return pm_runtime_force_resume(dev); +} static const struct dev_pm_ops msdc_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) }; diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index d25a4b50c2f3..3b8d456e857d 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -1186,16 +1186,19 @@ static struct sdhci_arasan_of_data sdhci_arasan_versal_data = { static struct sdhci_arasan_of_data intel_keembay_emmc_data = { .soc_ctl_map = &intel_keembay_soc_ctl_map, .pdata = &sdhci_keembay_emmc_pdata, + .clk_ops = &arasan_clk_ops, }; static struct sdhci_arasan_of_data intel_keembay_sd_data = { .soc_ctl_map = &intel_keembay_soc_ctl_map, .pdata = &sdhci_keembay_sd_pdata, + .clk_ops = &arasan_clk_ops, }; static struct sdhci_arasan_of_data intel_keembay_sdio_data = { .soc_ctl_map = &intel_keembay_soc_ctl_map, .pdata = &sdhci_keembay_sdio_pdata, + .clk_ops = &arasan_clk_ops, }; static const struct of_device_id sdhci_arasan_of_match[] = { diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index cb4149fd12e0..ac4e7874a3f1 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -927,9 +927,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: tmio_mmc_power_off(host); - /* Downgrade ensures a sane state for tuning HW (e.g. SCC) */ - if (host->mmc->ops->hs400_downgrade) - host->mmc->ops->hs400_downgrade(host->mmc); + /* For R-Car Gen2+, we need to reset SDHI specific SCC */ + if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) + host->reset(host); host->set_clock(host, 0); break; case MMC_POWER_UP: diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 0c352b39ad4b..ff1697f899ba 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -218,7 +218,9 @@ static int gpio_nand_setup_interface(struct nand_chip *this, int csline, static int gpio_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 7892022bd6dd..7b6b354f2d39 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -239,7 +239,9 @@ static int au1550nd_exec_op(struct nand_chip *this, static int au1550nd_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index eb03b8cea1cb..fb7a086de35e 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -164,7 +164,9 @@ static int gpio_nand_exec_op(struct nand_chip *chip, static int gpio_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index fb4c0b11689f..bcd4a556c959 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -606,7 +606,9 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) static int mpc5121_nfc_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index e3bb65fd3ab2..66211c9311d2 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -86,7 +86,9 @@ static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) static int orion_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 4dfff34800f4..68c08772d7c2 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -77,7 +77,9 @@ static int pasemi_device_ready(struct nand_chip *chip) static int pasemi_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index 93d9f1694dc1..7711e1020c21 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -22,7 +22,9 @@ struct plat_nand_data { static int plat_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 107208311987..70f8305c9b6e 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -120,7 +120,9 @@ static int socrates_nand_device_ready(struct nand_chip *nand_chip) static int socrates_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index efc5bf5434e0..26751976e502 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -149,7 +149,9 @@ static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len) static int xway_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e0880a3840d7..5fe5232cc3f3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4746,15 +4746,13 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; -#ifdef CONFIG_XFRM_OFFLOAD - bond_dev->hw_features |= BOND_XFRM_FEATURES; -#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; #ifdef CONFIG_XFRM_OFFLOAD - /* Disable XFRM features if this isn't an active-backup config */ - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - bond_dev->features &= ~BOND_XFRM_FEATURES; + bond_dev->hw_features |= BOND_XFRM_FEATURES; + /* Only enable XFRM features if this is an active-backup config */ + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) + bond_dev->features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 9abfaae1c6f7..a4e4e15f574d 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -745,6 +745,19 @@ const struct bond_option *bond_opt_get(unsigned int option) return &bond_opts[option]; } +static void bond_set_xfrm_features(struct net_device *bond_dev, u64 mode) +{ + if (!IS_ENABLED(CONFIG_XFRM_OFFLOAD)) + return; + + if (mode == BOND_MODE_ACTIVEBACKUP) + bond_dev->wanted_features |= BOND_XFRM_FEATURES; + else + bond_dev->wanted_features &= ~BOND_XFRM_FEATURES; + + netdev_update_features(bond_dev); +} + static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { @@ -767,13 +780,8 @@ static int bond_option_mode_set(struct bonding *bond, if (newval->value == BOND_MODE_ALB) bond->params.tlb_dynamic_lb = 1; -#ifdef CONFIG_XFRM_OFFLOAD - if (newval->value == BOND_MODE_ACTIVEBACKUP) - bond->dev->wanted_features |= BOND_XFRM_FEATURES; - else - bond->dev->wanted_features &= ~BOND_XFRM_FEATURES; - netdev_change_features(bond->dev); -#endif /* CONFIG_XFRM_OFFLOAD */ + if (bond->dev->reg_state == NETREG_REGISTERED) + bond_set_xfrm_features(bond->dev, newval->value); /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index e85f20d18d67..038fe1036df2 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1940,15 +1940,8 @@ static const struct of_device_id flexcan_of_match[] = { }; MODULE_DEVICE_TABLE(of, flexcan_of_match); -static const struct platform_device_id flexcan_id_table[] = { - { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(platform, flexcan_id_table); - static int flexcan_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; @@ -1997,15 +1990,7 @@ static int flexcan_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); - of_id = of_match_device(flexcan_of_match, &pdev->dev); - if (of_id) { - devtype_data = of_id->data; - } else if (platform_get_device_id(pdev)->driver_data) { - devtype_data = (struct flexcan_devtype_data *) - platform_get_device_id(pdev)->driver_data; - } else { - return -ENODEV; - } + devtype_data = of_device_get_match_data(&pdev->dev); if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) { @@ -2235,7 +2220,6 @@ static struct platform_driver flexcan_driver = { }, .probe = flexcan_probe, .remove = flexcan_remove, - .id_table = flexcan_id_table, }; module_platform_driver(flexcan_driver); diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index e3eb69b76cf5..45ad1b3f0cd0 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -7,6 +7,13 @@ menuconfig CAN_M_CAN if CAN_M_CAN +config CAN_M_CAN_PCI + tristate "Generic PCI Bus based M_CAN driver" + depends on PCI + help + Say Y here if you want to support Bosch M_CAN controller connected + to the pci bus. + config CAN_M_CAN_PLATFORM tristate "Bosch M_CAN support for io-mapped devices" depends on HAS_IOMEM diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile index 52a4a6fbe527..ef7963ff2006 100644 --- a/drivers/net/can/m_can/Makefile +++ b/drivers/net/can/m_can/Makefile @@ -4,5 +4,6 @@ # obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 05c978d1c53d..06c136961c7c 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -380,10 +380,6 @@ void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) cccr &= ~CCCR_CSR; if (enable) { - /* Clear the Clock stop request if it was set */ - if (cccr & CCCR_CSR) - cccr &= ~CCCR_CSR; - /* enable m_can configuration */ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); @@ -1830,10 +1826,9 @@ int m_can_class_register(struct m_can_classdev *m_can_dev) int ret; if (m_can_dev->pm_clock_support) { - pm_runtime_enable(m_can_dev->dev); ret = m_can_clk_start(m_can_dev); if (ret) - goto pm_runtime_fail; + return ret; } ret = m_can_dev_setup(m_can_dev); @@ -1859,11 +1854,6 @@ int m_can_class_register(struct m_can_classdev *m_can_dev) */ clk_disable: m_can_clk_stop(m_can_dev); -pm_runtime_fail: - if (ret) { - if (m_can_dev->pm_clock_support) - pm_runtime_disable(m_can_dev->dev); - } return ret; } diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c new file mode 100644 index 000000000000..04010ee0407c --- /dev/null +++ b/drivers/net/can/m_can/m_can_pci.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Specific M_CAN Glue + * + * Copyright (C) 2018-2020 Intel Corporation + * Author: Felipe Balbi (Intel) + * Author: Jarkko Nikula <[email protected]> + * Author: Raymond Tan <[email protected]> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> + +#include "m_can.h" + +#define M_CAN_PCI_MMIO_BAR 0 + +#define M_CAN_CLOCK_FREQ_EHL 100000000 +#define CTL_CSR_INT_CTL_OFFSET 0x508 + +struct m_can_pci_priv { + void __iomem *base; +}; + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_pci_priv *priv = cdev->device_data; + + return readl(priv->base + reg); +} + +static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +{ + struct m_can_pci_priv *priv = cdev->device_data; + + return readl(priv->base + offset); +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_pci_priv *priv = cdev->device_data; + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +{ + struct m_can_pci_priv *priv = cdev->device_data; + + writel(val, priv->base + offset); + + return 0; +} + +static struct m_can_ops m_can_pci_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct device *dev = &pci->dev; + struct m_can_classdev *mcan_class; + struct m_can_pci_priv *priv; + void __iomem *base; + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pcim_iomap_regions(pci, BIT(M_CAN_PCI_MMIO_BAR), pci_name(pci)); + if (ret) + return ret; + + base = pcim_iomap_table(pci)[M_CAN_PCI_MMIO_BAR]; + + if (!base) { + dev_err(dev, "failed to map BARs\n"); + return -ENOMEM; + } + + priv = devm_kzalloc(&pci->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mcan_class = m_can_class_allocate_dev(&pci->dev); + if (!mcan_class) + return -ENOMEM; + + priv->base = base; + + ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + mcan_class->device_data = priv; + mcan_class->dev = &pci->dev; + mcan_class->net->irq = pci_irq_vector(pci, 0); + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = id->driver_data; + mcan_class->ops = &m_can_pci_ops; + + pci_set_drvdata(pci, mcan_class->net); + + ret = m_can_class_register(mcan_class); + if (ret) + goto err; + + /* Enable interrupt control at CAN wrapper IP */ + writel(0x1, base + CTL_CSR_INT_CTL_OFFSET); + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + return 0; + +err: + pci_free_irq_vectors(pci); + return ret; +} + +static void m_can_pci_remove(struct pci_dev *pci) +{ + struct net_device *dev = pci_get_drvdata(pci); + struct m_can_classdev *mcan_class = netdev_priv(dev); + struct m_can_pci_priv *priv = mcan_class->device_data; + + pm_runtime_forbid(&pci->dev); + pm_runtime_get_noresume(&pci->dev); + + /* Disable interrupt control at CAN wrapper IP */ + writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET); + + m_can_class_unregister(mcan_class); + pci_free_irq_vectors(pci); +} + +static __maybe_unused int m_can_pci_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_pci_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, + m_can_pci_suspend, m_can_pci_resume); + +static const struct pci_device_id m_can_pci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); + +static struct pci_driver m_can_pci_driver = { + .name = "m_can_pci", + .probe = m_can_pci_probe, + .remove = m_can_pci_remove, + .id_table = m_can_pci_id_table, + .driver = { + .pm = &m_can_pci_pm_ops, + }, +}; + +module_pci_driver(m_can_pci_driver); + +MODULE_AUTHOR("Felipe Balbi (Intel)"); +MODULE_AUTHOR("Jarkko Nikula <[email protected]>"); +MODULE_AUTHOR("Raymond Tan <[email protected]>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller on PCI bus"); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index c45a889a1afd..36ef791da388 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -115,8 +115,15 @@ static int m_can_plat_probe(struct platform_device *pdev) m_can_init_ram(mcan_class); - return m_can_class_register(mcan_class); + pm_runtime_enable(mcan_class->dev); + ret = m_can_class_register(mcan_class); + if (ret) + goto out_runtime_disable; + + return ret; +out_runtime_disable: + pm_runtime_disable(mcan_class->dev); probe_fail: m_can_class_free_dev(mcan_class->net); return ret; diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index 450c5cfcb3fc..3c1912c0430b 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -157,7 +157,7 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) /* There was a problem reading the mailbox, propagate * error value. */ - if (unlikely(IS_ERR(skb))) { + if (IS_ERR(skb)) { offload->dev->stats.rx_dropped++; offload->dev->stats.rx_fifo_errors++; diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 03a68bb486fd..40070c930202 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev) /* check or determine and set bittime */ ret = open_candev(ndev); - if (!ret) - ret = softing_startstop(ndev, 1); + if (ret) + return ret; + + ret = softing_startstop(ndev, 1); + if (ret < 0) + close_candev(ndev); + return ret; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 20cbd5c446f5..77129d5f410b 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -965,7 +965,10 @@ static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) { u8 mode; - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; + else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode = MCP251XFD_REG_CON_MODE_LISTENONLY; else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) mode = MCP251XFD_REG_CON_MODE_MIXED; @@ -2881,9 +2884,9 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; - priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | - CAN_CTRLMODE_FD_NON_ISO; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; priv->ndev = ndev; priv->spi = spi; priv->rx_int = rx_int; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 6408402a44f5..99bf8fed6536 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -870,6 +870,46 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(mt7530_mib); } +static int +mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + struct mt7530_priv *priv = ds->priv; + unsigned int secs = msecs / 1000; + unsigned int tmp_age_count; + unsigned int error = -1; + unsigned int age_count; + unsigned int age_unit; + + /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ + if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) + return -ERANGE; + + /* iterate through all possible age_count to find the closest pair */ + for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { + unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; + + if (tmp_age_unit <= AGE_UNIT_MAX) { + unsigned int tmp_error = secs - + (tmp_age_count + 1) * (tmp_age_unit + 1); + + /* found a closer pair */ + if (error > tmp_error) { + error = tmp_error; + age_count = tmp_age_count; + age_unit = tmp_age_unit; + } + + /* found the exact match, so break the loop */ + if (!error) + break; + } + } + + mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); + + return 0; +} + static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) { struct mt7530_priv *priv = ds->priv; @@ -2564,6 +2604,7 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phy_write = mt753x_phy_write, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, + .set_ageing_time = mt7530_set_ageing_time, .port_enable = mt7530_port_enable, .port_disable = mt7530_port_disable, .port_change_mtu = mt7530_port_change_mtu, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index ee3523a7537e..32d8969b3ace 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -161,6 +161,19 @@ enum mt7530_vlan_egress_attr { MT7530_VLAN_EGRESS_STACK = 3, }; +/* Register for address age control */ +#define MT7530_AAC 0xa0 +/* Disable ageing */ +#define AGE_DIS BIT(20) +/* Age count */ +#define AGE_CNT_MASK GENMASK(19, 12) +#define AGE_CNT_MAX 0xff +#define AGE_CNT(x) (AGE_CNT_MASK & ((x) << 12)) +/* Age unit */ +#define AGE_UNIT_MASK GENMASK(11, 0) +#define AGE_UNIT_MAX 0xfff +#define AGE_UNIT(x) (AGE_UNIT_MASK & (x)) + /* Register for port STP state control */ #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) #define FID_PST(x) ((x) & 0x3) diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index bac9a8a68e50..40bd67a5c8e9 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -333,7 +333,7 @@ static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid, mask = chip->info->atu_move_port_mask; shift = bitmap_weight(&mask, 16); - entry.state = 0xf, /* Full EntryState means Move */ + entry.state = 0xf; /* Full EntryState means Move */ entry.portvec = from_port & mask; entry.portvec |= (to_port & mask) << shift; diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index ada75fa15861..7dc230677b78 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -588,7 +588,6 @@ static int felix_setup(struct dsa_switch *ds) struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); int port, err; - int tc; err = felix_init_structs(felix, ds->num_ports); if (err) @@ -627,12 +626,6 @@ static int felix_setup(struct dsa_switch *ds) ocelot_write_rix(ocelot, ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), ANA_PGID_PGID, PGID_UC); - /* Setup the per-traffic class flooding PGIDs */ - for (tc = 0; tc < FELIX_NUM_TC; tc++) - ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | - ANA_FLOODING_FLD_BROADCAST(PGID_MC) | - ANA_FLOODING_FLD_UNICAST(PGID_UC), - ANA_FLOODING, tc); ds->mtu_enforcement_ingress = true; ds->configure_vlan_while_not_filtering = true; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 3e925b8d5306..2e5bbdca5ea4 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1429,6 +1429,7 @@ static int felix_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, felix); ocelot = &felix->ocelot; ocelot->dev = &pdev->dev; + ocelot->num_flooding_pgids = FELIX_NUM_TC; felix->info = &felix_info_vsc9959; felix->switch_base = pci_resource_start(pdev, felix->info->switch_pci_bar); diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 1d420c4a2f0f..ebbaf6817ec8 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -1210,6 +1210,7 @@ static int seville_probe(struct platform_device *pdev) ocelot = &felix->ocelot; ocelot->dev = &pdev->dev; + ocelot->num_flooding_pgids = 1; felix->info = &seville_info_vsc9953; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 307466b90489..83d481ef9273 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -384,7 +384,6 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, { struct realtek_smi *smi = ds->priv; u16 vid; - int ret; for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) if (!smi->ops->is_vlan_valid(smi, vid)) @@ -397,11 +396,7 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, * FIXME: what's with this 4k business? * Just rtl8366_enable_vlan() seems inconclusive. */ - ret = rtl8366_enable_vlan4k(smi, true); - if (ret) - return ret; - - return 0; + return rtl8366_enable_vlan4k(smi, true); } EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare); diff --git a/drivers/net/ethernet/agere/Kconfig b/drivers/net/ethernet/agere/Kconfig index d92516ae59cc..9cd750184947 100644 --- a/drivers/net/ethernet/agere/Kconfig +++ b/drivers/net/ethernet/agere/Kconfig @@ -21,6 +21,7 @@ config ET131X tristate "Agere ET-1310 Gigabit Ethernet support" depends on PCI select PHYLIB + select CRC32 help This driver supports Agere ET-1310 ethernet adapters. diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 5f8769aa469d..02087d443e73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -71,7 +71,8 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, dma_addr_t addr) { if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { - pr_err("DMA address has more bits that the device supports\n"); + netdev_err(ena_dev->net_device, + "DMA address has more bits that the device supports\n"); return -EINVAL; } @@ -83,6 +84,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_sq *sq = &admin_queue->sq; u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); @@ -90,7 +92,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) &sq->dma_addr, GFP_KERNEL); if (!sq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -105,6 +107,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_cq *cq = &admin_queue->cq; u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); @@ -112,7 +115,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) &cq->dma_addr, GFP_KERNEL); if (!cq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -135,7 +138,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -156,7 +159,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { - pr_err("AENQ handlers pointer is NULL\n"); + netdev_err(ena_dev->net_device, + "AENQ handlers pointer is NULL\n"); return -EINVAL; } @@ -176,18 +180,21 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu u16 command_id, bool capture) { if (unlikely(command_id >= admin_queue->q_depth)) { - pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n", - command_id, admin_queue->q_depth); + netdev_err(admin_queue->ena_dev->net_device, + "Command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, admin_queue->q_depth); return NULL; } if (unlikely(!admin_queue->comp_ctx)) { - pr_err("Completion context is NULL\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Completion context is NULL\n"); return NULL; } if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { - pr_err("Completion context is occupied\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Completion context is occupied\n"); return NULL; } @@ -217,7 +224,8 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu /* In case of queue FULL */ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - pr_debug("Admin queue is full.\n"); + netdev_dbg(admin_queue->ena_dev->net_device, + "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } @@ -259,6 +267,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); struct ena_comp_ctx *comp_ctx; u16 i; @@ -266,7 +275,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); if (unlikely(!admin_queue->comp_ctx)) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -337,7 +346,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, } if (!io_sq->desc_addr.virt_addr) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, + "Memory allocation failed\n"); return -ENOMEM; } } @@ -363,7 +373,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); if (!io_sq->bounce_buf_ctrl.base_buffer) { - pr_err("Bounce buffer memory allocation failed\n"); + netdev_err(ena_dev->net_device, + "Bounce buffer memory allocation failed\n"); return -ENOMEM; } @@ -423,7 +434,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, } if (!io_cq->cdesc_addr.virt_addr) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -444,7 +455,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); if (unlikely(!comp_ctx)) { - pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); + netdev_err(admin_queue->ena_dev->net_device, + "comp_ctx is NULL. Changing the admin queue running state\n"); admin_queue->running_state = false; return; } @@ -496,10 +508,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu admin_queue->stats.completed_cmd += comp_num; } -static int ena_com_comp_status_to_errno(u8 comp_status) +static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + u8 comp_status) { if (unlikely(comp_status != 0)) - pr_err("Admin command failed[%u]\n", comp_status); + netdev_err(admin_queue->ena_dev->net_device, + "Admin command failed[%u]\n", comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: @@ -546,7 +560,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c break; if (time_is_before_jiffies(timeout)) { - pr_err("Wait for completion (polling) timeout\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.no_completion++; @@ -562,7 +577,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { - pr_err("Command was aborted\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Command was aborted\n"); spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -573,7 +589,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; @@ -615,7 +631,8 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set LLQ configurations: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set LLQ configurations: %d\n", ret); return ret; } @@ -637,8 +654,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->header_location_ctrl = llq_default_cfg->llq_header_location; } else { - pr_err("Invalid header location control, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid header location control, supported: 0x%x\n", + supported_feat); return -EINVAL; } @@ -652,14 +670,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; } else { - pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_stride_ctrl, supported_feat, - llq_info->desc_stride_ctrl); + netdev_err(ena_dev->net_device, + "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, + supported_feat, llq_info->desc_stride_ctrl); } } else { llq_info->desc_stride_ctrl = 0; @@ -680,20 +700,23 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_info->desc_list_entry_size = 256; } else { - pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_ring_entry_size, supported_feat, - llq_info->desc_list_entry_size); + netdev_err(ena_dev->net_device, + "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, supported_feat, + llq_info->desc_list_entry_size); } if (unlikely(llq_info->desc_list_entry_size & 0x7)) { /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ - pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size); + netdev_err(ena_dev->net_device, "Illegal entry size %d\n", + llq_info->desc_list_entry_size); return -EINVAL; } @@ -716,14 +739,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; } else { - pr_err("Invalid descs_num_before_header, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_num_decs_before_header, - supported_feat, llq_info->descs_num_before_header); + netdev_err(ena_dev->net_device, + "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ llq_accel_mode_get = llq_features->accel_mode.u.get; @@ -739,7 +764,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, rc = ena_com_set_llq(ena_dev); if (rc) - pr_err("Cannot set LLQ configuration: %d\n", rc); + netdev_err(ena_dev->net_device, + "Cannot set LLQ configuration: %d\n", rc); return rc; } @@ -766,15 +792,17 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com spin_unlock_irqrestore(&admin_queue->q_lock, flags); if (comp_ctx->status == ENA_CMD_COMPLETED) { - pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", - comp_ctx->cmd_opcode, - admin_queue->auto_polling ? "ON" : "OFF"); + netdev_err(admin_queue->ena_dev->net_device, + "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", + comp_ctx->cmd_opcode, + admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; } else { - pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", - comp_ctx->cmd_opcode, comp_ctx->status); + netdev_err(admin_queue->ena_dev->net_device, + "The ena device didn't send a completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. * This will happen if there is a completion without an interrupt @@ -787,7 +815,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com } } - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; @@ -834,15 +862,17 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (unlikely(i == timeout)) { - pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + netdev_err(ena_dev->net_device, + "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; goto err; } if (read_resp->reg_off != offset) { - pr_err("Read failure: wrong offset provided\n"); + netdev_err(ena_dev->net_device, + "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -901,7 +931,8 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - pr_err("Failed to destroy io sq error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to destroy io sq error: %d\n", ret); return ret; } @@ -951,7 +982,8 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, + "Reg read timeout occurred\n"); return -ETIME; } @@ -991,7 +1023,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - pr_debug("Feature %d isn't supported\n", feature_id); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -1010,7 +1043,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, &get_cmd.control_buffer.address, control_buf_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -1027,8 +1060,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - pr_err("Failed to submit get_feature command %d error: %d\n", - feature_id, ret); + netdev_err(ena_dev->net_device, + "Failed to submit get_feature command %d error: %d\n", + feature_id, ret); return ret; } @@ -1130,9 +1164,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, if ((get_resp.u.ind_table.min_size > log_size) || (get_resp.u.ind_table.max_size < log_size)) { - pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", - 1 << log_size, 1 << get_resp.u.ind_table.min_size, - 1 << get_resp.u.ind_table.max_size); + netdev_err(ena_dev->net_device, + "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); return -EINVAL; } @@ -1223,7 +1258,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, + "Memory address set failed\n"); return ret; } } @@ -1234,7 +1270,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - pr_err("Failed to create IO SQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to create IO SQ. error: %d\n", ret); return ret; } @@ -1252,7 +1289,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, cmd_completion.llq_descriptors_offset); } - pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", + io_sq->idx, io_sq->q_depth); return ret; } @@ -1286,7 +1324,8 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; if (unlikely(!intr_delay_resolution)) { - pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + netdev_err(ena_dev->net_device, + "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } @@ -1321,22 +1360,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { - if (comp_ctx == ERR_PTR(-ENODEV)) - pr_debug("Failed to submit command [%ld]\n", - PTR_ERR(comp_ctx)); + ret = PTR_ERR(comp_ctx); + if (ret == -ENODEV) + netdev_dbg(admin_queue->ena_dev->net_device, + "Failed to submit command [%d]\n", ret); else - pr_err("Failed to submit command [%ld]\n", - PTR_ERR(comp_ctx)); + netdev_err(admin_queue->ena_dev->net_device, + "Failed to submit command [%d]\n", ret); - return PTR_ERR(comp_ctx); + return ret; } ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); if (unlikely(ret)) { if (admin_queue->running_state) - pr_err("Failed to process command. ret = %d\n", ret); + netdev_err(admin_queue->ena_dev->net_device, + "Failed to process command. ret = %d\n", ret); else - pr_debug("Failed to process command. ret = %d\n", ret); + netdev_dbg(admin_queue->ena_dev->net_device, + "Failed to process command. ret = %d\n", ret); } return ret; } @@ -1365,7 +1407,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, &create_cmd.cq_ba, io_cq->cdesc_addr.phys_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -1375,7 +1417,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - pr_err("Failed to create IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to create IO CQ. error: %d\n", ret); return ret; } @@ -1394,7 +1437,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); - pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", + io_cq->idx, io_cq->q_depth); return ret; } @@ -1404,8 +1448,9 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Invalid queue number %d but the max is %d\n", qid, - ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Invalid queue number %d but the max is %d\n", qid, + ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1471,7 +1516,8 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - pr_err("Failed to destroy IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to destroy IO CQ. error: %d\n", ret); return ret; } @@ -1513,13 +1559,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); if (ret) { - pr_info("Can't get aenq configuration\n"); + dev_info(ena_dev->dmadev, "Can't get aenq configuration\n"); return ret; } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", - get_resp.u.aenq.supported_groups, groups_flag); + netdev_warn(ena_dev->net_device, + "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", + get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } @@ -1538,7 +1585,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to config AENQ ret: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to config AENQ ret: %d\n", ret); return ret; } @@ -1546,20 +1594,21 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) int ena_com_get_dma_width(struct ena_com_dev *ena_dev) { u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); - int width; + u32 width; if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; - pr_debug("ENA dma width: %d\n", width); + netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { - pr_err("DMA width illegal value: %d\n", width); + netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", + width); return -EINVAL; } @@ -1583,23 +1632,24 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } - pr_info("ENA device version: %d.%d\n", - (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, - ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - pr_info("ENA controller version: %d.%d.%d implementation version %d\n", - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> - ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> - ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + dev_info(ena_dev->dmadev, + "ENA controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); ctrl_ver_masked = (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | @@ -1608,7 +1658,8 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { - pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + netdev_err(ena_dev->net_device, + "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -1; } @@ -1741,12 +1792,13 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { - pr_err("Device isn't ready, abort com init\n"); + netdev_err(ena_dev->net_device, + "Device isn't ready, abort com init\n"); return -ENODEV; } @@ -1823,8 +1875,9 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Qid (%d) is bigger than max num of queues (%d)\n", - ctx->qid, ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Qid (%d) is bigger than max num of queues (%d)\n", + ctx->qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1882,8 +1935,9 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, - ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Qid (%d) is bigger than max num of queues (%d)\n", + qid, ENA_TOTAL_NUM_QUEUES); return; } @@ -2035,8 +2089,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); - pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", - aenq_common->group, aenq_common->syndrome, timestamp); + netdev_dbg(ena_dev->net_device, + "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", + aenq_common->group, aenq_common->syndrome, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(ena_dev, @@ -2079,19 +2134,20 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { - pr_err("Reg read32 timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); return -ETIME; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { - pr_err("Device isn't ready, can't reset device\n"); + netdev_err(ena_dev->net_device, + "Device isn't ready, can't reset device\n"); return -EINVAL; } timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; if (timeout == 0) { - pr_err("Invalid timeout value\n"); + netdev_err(ena_dev->net_device, "Invalid timeout value\n"); return -EINVAL; } @@ -2107,7 +2163,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { - pr_err("Reset indication didn't turn on\n"); + netdev_err(ena_dev->net_device, + "Reset indication didn't turn on\n"); return rc; } @@ -2115,7 +2172,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { - pr_err("Reset indication didn't turn off\n"); + netdev_err(ena_dev->net_device, + "Reset indication didn't turn off\n"); return rc; } @@ -2152,7 +2210,8 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - pr_err("Failed to get stats. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to get stats. error: %d\n", ret); return ret; } @@ -2187,7 +2246,7 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, return ret; } -int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; @@ -2195,7 +2254,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_MTU); return -EOPNOTSUPP; } @@ -2214,7 +2274,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); + netdev_err(ena_dev->net_device, + "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } @@ -2228,7 +2289,8 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { - pr_err("Failed to get offload capabilities %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to get offload capabilities %d\n", ret); return ret; } @@ -2248,8 +2310,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_FUNCTION); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); return -EOPNOTSUPP; } @@ -2260,8 +2322,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { - pr_err("Func hash %d isn't supported by device, abort\n", - rss->hash_func); + netdev_err(ena_dev->net_device, + "Func hash %d isn't supported by device, abort\n", + rss->hash_func); return -EOPNOTSUPP; } @@ -2278,7 +2341,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_key_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2290,8 +2353,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { - pr_err("Failed to set hash function %d. error: %d\n", - rss->hash_func, ret); + netdev_err(ena_dev->net_device, + "Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); return -EINVAL; } @@ -2322,7 +2386,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { - pr_err("Flow hash function %d isn't supported\n", func); + netdev_err(ena_dev->net_device, + "Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } @@ -2330,8 +2395,9 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, case ENA_ADMIN_TOEPLITZ: if (key) { if (key_len != sizeof(hash_key->key)) { - pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); + netdev_err(ena_dev->net_device, + "key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); return -EINVAL; } memcpy(hash_key->key, key, key_len); @@ -2343,7 +2409,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, rss->hash_init_val = init_val; break; default: - pr_err("Invalid hash function (%d)\n", func); + netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n", + func); return -EINVAL; } @@ -2429,8 +2496,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_INPUT); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); return -EOPNOTSUPP; } @@ -2448,7 +2515,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_ctrl_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = sizeof(*hash_ctrl); @@ -2459,7 +2526,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set hash input. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set hash input. error: %d\n", ret); return ret; } @@ -2509,9 +2577,10 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) available_fields = hash_ctrl->selected_fields[i].fields & hash_ctrl->supported_fields[i].fields; if (available_fields != hash_ctrl->selected_fields[i].fields) { - pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", - i, hash_ctrl->supported_fields[i].fields, - hash_ctrl->selected_fields[i].fields); + netdev_err(ena_dev->net_device, + "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); return -EOPNOTSUPP; } } @@ -2535,7 +2604,8 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { - pr_err("Invalid proto num (%u)\n", proto); + netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", + proto); return -EINVAL; } @@ -2547,8 +2617,9 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, /* Make sure all the fields are supported */ supported_fields = hash_ctrl->supported_fields[proto].fields; if ((hash_fields & supported_fields) != hash_fields) { - pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n", - proto, hash_fields, supported_fields); + netdev_err(ena_dev->net_device, + "Proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); } hash_ctrl->selected_fields[proto].fields = hash_fields; @@ -2588,14 +2659,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id( ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return -EOPNOTSUPP; } ret = ena_com_ind_tbl_convert_to_device(ena_dev); if (ret) { - pr_err("Failed to convert host indirection table to device table\n"); + netdev_err(ena_dev->net_device, + "Failed to convert host indirection table to device table\n"); return ret; } @@ -2612,7 +2684,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->rss_ind_tbl_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2626,7 +2698,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set indirect table. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set indirect table. error: %d\n", ret); return ret; } @@ -2782,7 +2855,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.debug_ba, host_attr->debug_area_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2790,7 +2863,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.os_info_ba, host_attr->host_info_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2803,7 +2876,8 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set host attributes: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set host attributes: %d\n", ret); return ret; } @@ -2815,12 +2889,14 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); } -static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, +static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, + u32 coalesce_usecs, u32 intr_delay_resolution, u32 *intr_moder_interval) { if (!intr_delay_resolution) { - pr_err("Illegal interrupt delay granularity value\n"); + netdev_err(ena_dev->net_device, + "Illegal interrupt delay granularity value\n"); return -EFAULT; } @@ -2832,7 +2908,8 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, u32 tx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + tx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_tx_interval); } @@ -2840,7 +2917,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, u32 rx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + rx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_rx_interval); } @@ -2856,12 +2934,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EOPNOTSUPP) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_INTERRUPT_MODERATION); + netdev_dbg(ena_dev->net_device, + "Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { - pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", - rc); + netdev_err(ena_dev->net_device, + "Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); } /* no moderation supported, disable adaptive support */ @@ -2909,7 +2989,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { - pr_err("The size of the LLQ entry is smaller than needed\n"); + netdev_err(ena_dev->net_device, + "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 55097750d062..343caf41e709 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -303,6 +303,7 @@ struct ena_com_dev { u8 __iomem *reg_bar; void __iomem *mem_bar; void *dmadev; + struct net_device *net_device; enum ena_admin_placement_policy_type tx_mem_queue_type; u32 tx_max_header_size; @@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, * * @return: 0 on Success and negative value otherwise. */ -int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu); /* ena_com_get_offload_settings - Retrieve the device offloads capabilities * @ena_dev: ENA communication layer struct @@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_config); +/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) +{ + return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); +} + +/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) +{ + return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); +} + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 032ab9f20438..c3be751e7379 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, if (is_llq_max_tx_burst_exists(io_sq)) { if (unlikely(!io_sq->entries_in_tx_burst_left)) { - pr_err("Error: trying to send more packets than tx burst allows\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Error: trying to send more packets than tx burst allows\n"); return -ENOSPC; } io_sq->entries_in_tx_burst_left--; - pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n", - io_sq->qid, io_sq->entries_in_tx_burst_left); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Decreasing entries_in_tx_burst_left of queue %d to %d\n", + io_sq->qid, io_sq->entries_in_tx_burst_left); } /* Make sure everything was written into the bounce buffer before @@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { - pr_err("Trying to write header larger than llq entry can accommodate\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Trying to write header larger than llq entry can accommodate\n"); return -EFAULT; } if (unlikely(!bounce_buffer)) { - pr_err("Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Bounce buffer is NULL\n"); return -EFAULT; } @@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) bounce_buffer = pkt_ctrl->curr_bounce_buf; if (unlikely(!bounce_buffer)) { - pr_err("Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Bounce buffer is NULL\n"); return NULL; } @@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; - pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", - io_cq->qid, *first_cdesc_idx, count); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); } else { io_cq->cur_rx_pkt_cdesc_count += count; count = 0; @@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, return 0; } -static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, +static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, + struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { ena_rx_ctx->l3_proto = cdesc->status & @@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; - pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", - ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, - ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, - ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", + ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); } /*****************************************************************************/ @@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* num_bufs +1 for potential meta desc */ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { - pr_debug("Not enough space in the tx queue\n"); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Not enough space in the tx queue\n"); return -ENOMEM; } if (unlikely(header_len > io_sq->tx_max_header_size)) { - pr_err("Header size is too large %d max header: %d\n", - header_len, io_sq->tx_max_header_size); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); return -EINVAL; } @@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); if (unlikely(rc)) { - pr_err("Failed to create and store tx meta desc\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Failed to create and store tx meta desc\n"); return rc; } @@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, return 0; } - pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, - nb_hw_desc); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, + nb_hw_desc); if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { - pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, - ena_rx_ctx->max_bufs); + netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, + ena_rx_ctx->max_bufs); return -ENOSPC; } @@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; - pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, - io_sq->next_to_comp); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "[%s][QID#%d] Updating SQ head to: %d\n", __func__, + io_sq->qid, io_sq->next_to_comp); /* Get rx flags from the last pkt */ - ena_com_rx_set_flags(ena_rx_ctx, cdesc); + ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); ena_rx_ctx->descs = nb_hw_desc; + return 0; } @@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | ENA_ETH_IO_RX_DESC_LAST_MASK | - (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | - ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); desc->req_id = req_id; + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", + __func__, io_sq->qid, req_id); + desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2c16c218818a..689313ee25a8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, llq_info->descs_per_entry); } - pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, - num_descs, num_entries_needed); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Queue: %d num_descs: %d num_entries_needed: %d\n", + io_sq->qid, num_descs, num_entries_needed); return num_entries_needed > io_sq->entries_in_tx_burst_left; } @@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; u16 tail = io_sq->tail; - pr_debug("Write submission queue doorbell for queue: %d tail: %d\n", - io_sq->qid, tail); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); writel(tail, io_sq->db_addr); if (is_llq_max_tx_burst_exists(io_sq)) { - pr_debug("Reset available entries in tx burst for queue %d to %d\n", - io_sq->qid, max_entries_in_tx_burst); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Reset available entries in tx burst for queue %d to %d\n", + io_sq->qid, max_entries_in_tx_burst); io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; } @@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); if (unlikely(need_update)) { - pr_debug("Write completion queue doorbell for queue %d: head: %d\n", - io_cq->qid, head); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); writel(head, io_cq->cq_head_db_reg); io_cq->last_head_update = head; } @@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, *req_id = READ_ONCE(cdesc->req_id); if (unlikely(*req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); + netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Invalid req id %d\n", cdesc->req_id); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 6cdd9efe8df3..d6cc7aa612b7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(xdp_pass), ENA_STAT_RX_ENTRY(xdp_tx), ENA_STAT_RX_ENTRY(xdp_invalid), + ENA_STAT_RX_ENTRY(xdp_redirect), }; static const struct ena_stats ena_stats_ena_com_strings[] = { @@ -839,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev, /* The check for max value is already done in ethtool */ if (count < ENA_MIN_NUM_IO_QUEUES || (ena_xdp_present(adapter) && - !ena_xdp_legal_queue_count(adapter, channels->combined_count))) + !ena_xdp_legal_queue_count(adapter, count))) return -EINVAL; return ena_update_queue_count(adapter, count); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0e98f45c2b22..06596fa1f9fe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -29,6 +29,8 @@ MODULE_LICENSE("GPL"); /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5 * HZ) +#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) + #define ENA_NAPI_BUDGET 64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ @@ -78,6 +80,15 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); +/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ +static void ena_increase_stat(u64 *statp, u64 cnt, + struct u64_stats_sync *syncp) +{ + u64_stats_update_begin(syncp); + (*statp) += cnt; + u64_stats_update_end(syncp); +} + static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ena_adapter *adapter = netdev_priv(dev); @@ -90,9 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) return; adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.tx_timeout++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); } @@ -152,9 +161,8 @@ static int ena_xmit_common(struct net_device *dev, if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "Failed to prepare tx bufs\n"); - u64_stats_update_begin(&ring->syncp); - ring->tx_stats.prepare_ctx_err++; - u64_stats_update_end(&ring->syncp); + ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, + &ring->syncp); if (rc != -ENOMEM) { adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; @@ -225,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget) return ret; } -static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, - struct ena_tx_buffer *tx_info, - struct xdp_buff *xdp, - void **push_hdr, - u32 *push_len) +static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, + struct ena_tx_buffer *tx_info, + struct xdp_frame *xdpf, + void **push_hdr, + u32 *push_len) { struct ena_adapter *adapter = xdp_ring->adapter; struct ena_com_buf *ena_buf; dma_addr_t dma = 0; u32 size; - tx_info->xdpf = xdp_convert_buff_to_frame(xdp); + tx_info->xdpf = xdpf; size = tx_info->xdpf->len; ena_buf = tx_info->bufs; @@ -262,9 +270,8 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, return 0; error_report_dma_error: - u64_stats_update_begin(&xdp_ring->syncp); - xdp_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&xdp_ring->syncp); + ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, + &xdp_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); xdp_return_frame_rx_napi(tx_info->xdpf); @@ -274,29 +281,24 @@ error_report_dma_error: return -EINVAL; } -static int ena_xdp_xmit_buff(struct net_device *dev, - struct xdp_buff *xdp, - int qid, - struct ena_rx_buffer *rx_info) +static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, + struct net_device *dev, + struct xdp_frame *xdpf, + int flags) { - struct ena_adapter *adapter = netdev_priv(dev); struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; - struct ena_ring *xdp_ring; u16 next_to_use, req_id; - int rc; void *push_hdr; u32 push_len; + int rc; - xdp_ring = &adapter->tx_ring[qid]; next_to_use = xdp_ring->next_to_use; req_id = xdp_ring->free_ids[next_to_use]; tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; - page_ref_inc(rx_info->page); - tx_info->xdp_rx_page = rx_info->page; - rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len); + rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len); if (unlikely(rc)) goto error_drop_packet; @@ -311,34 +313,82 @@ static int ena_xdp_xmit_buff(struct net_device *dev, tx_info, &ena_tx_ctx, next_to_use, - xdp->data_end - xdp->data); + xdpf->len); if (rc) goto error_unmap_dma; /* trigger the dma engine. ena_com_write_sq_doorbell() * has a mb */ - ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); - u64_stats_update_begin(&xdp_ring->syncp); - xdp_ring->tx_stats.doorbells++; - u64_stats_update_end(&xdp_ring->syncp); + if (flags & XDP_XMIT_FLUSH) { + ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); + ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, + &xdp_ring->syncp); + } - return NETDEV_TX_OK; + return rc; error_unmap_dma: ena_unmap_tx_buff(xdp_ring, tx_info); tx_info->xdpf = NULL; error_drop_packet: - __free_page(tx_info->xdp_rx_page); - return NETDEV_TX_OK; + xdp_return_frame(xdpf); + return rc; } -static int ena_xdp_execute(struct ena_ring *rx_ring, - struct xdp_buff *xdp, - struct ena_rx_buffer *rx_info) +static int ena_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct ena_adapter *adapter = netdev_priv(dev); + int qid, i, err, drops = 0; + struct ena_ring *xdp_ring; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return -ENETDOWN; + + /* We assume that all rings have the same XDP program */ + if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) + return -ENXIO; + + qid = smp_processor_id() % adapter->xdp_num_queues; + qid += adapter->xdp_first_ring; + xdp_ring = &adapter->tx_ring[qid]; + + /* Other CPU ids might try to send thorugh this queue */ + spin_lock(&xdp_ring->xdp_tx_lock); + + for (i = 0; i < n; i++) { + err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0); + /* The descriptor is freed by ena_xdp_xmit_frame in case + * of an error. + */ + if (err) + drops++; + } + + /* Ring doorbell to make device aware of the packets */ + if (flags & XDP_XMIT_FLUSH) { + ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); + ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, + &xdp_ring->syncp); + } + + spin_unlock(&xdp_ring->xdp_tx_lock); + + /* Return number of packets sent */ + return n - drops; +} + +static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) { struct bpf_prog *xdp_prog; + struct ena_ring *xdp_ring; u32 verdict = XDP_PASS; + struct xdp_frame *xdpf; u64 *xdp_stat; + int qid; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); @@ -348,28 +398,49 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, verdict = bpf_prog_run_xdp(xdp_prog, xdp); - if (verdict == XDP_TX) { - ena_xdp_xmit_buff(rx_ring->netdev, - xdp, - rx_ring->qid + rx_ring->adapter->num_io_queues, - rx_info); + switch (verdict) { + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + break; + } + + /* Find xmit queue */ + qid = rx_ring->qid + rx_ring->adapter->num_io_queues; + xdp_ring = &rx_ring->adapter->tx_ring[qid]; + + /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ + spin_lock(&xdp_ring->xdp_tx_lock); + + ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH); + spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; - } else if (unlikely(verdict == XDP_ABORTED)) { + break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { + xdp_stat = &rx_ring->rx_stats.xdp_redirect; + break; + } + fallthrough; + case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - } else if (unlikely(verdict == XDP_DROP)) { + break; + case XDP_DROP: xdp_stat = &rx_ring->rx_stats.xdp_drop; - } else if (unlikely(verdict == XDP_PASS)) { + break; + case XDP_PASS: xdp_stat = &rx_ring->rx_stats.xdp_pass; - } else { + break; + default: bpf_warn_invalid_xdp_action(verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; } - u64_stats_update_begin(&rx_ring->syncp); - (*xdp_stat)++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); out: rcu_read_unlock(); @@ -638,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); txr->disable_meta_caching = adapter->disable_meta_caching; + spin_lock_init(&txr->xdp_tx_lock); /* Don't init RX queues for xdp queues */ if (!ENA_IS_XDP_INDEX(adapter, i)) { @@ -922,9 +994,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, page = alloc_page(gfp); if (unlikely(!page)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.page_alloc_fail++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, + &rx_ring->syncp); return -ENOMEM; } @@ -934,9 +1005,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.dma_mapping_err++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, + &rx_ring->syncp); __free_page(page); return -EIO; @@ -952,11 +1022,20 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, return 0; } +static void ena_unmap_rx_buff(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info) +{ + struct ena_com_buf *ena_buf = &rx_info->ena_buf; + + dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, + ENA_PAGE_SIZE, + DMA_BIDIRECTIONAL); +} + static void ena_free_rx_page(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { struct page *page = rx_info->page; - struct ena_com_buf *ena_buf = &rx_info->ena_buf; if (unlikely(!page)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, @@ -964,9 +1043,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, return; } - dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, - ENA_PAGE_SIZE, - DMA_BIDIRECTIONAL); + ena_unmap_rx_buff(rx_ring, rx_info); __free_page(page); rx_info->page = NULL; @@ -1009,9 +1086,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) } if (unlikely(i < num)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.refil_partial++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, + &rx_ring->syncp); netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Refilled rx qid %d with only %d buffers (from %d)\n", rx_ring->qid, i, num); @@ -1187,9 +1263,7 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, "Invalid req_id: %hu\n", req_id); - u64_stats_update_begin(&ring->syncp); - ring->tx_stats.bad_req_id++; - u64_stats_update_end(&ring->syncp); + ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); /* Trigger device reset */ ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; @@ -1300,9 +1374,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) if (netif_tx_queue_stopped(txq) && above_thresh && test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { netif_tx_wake_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_wakeup++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, + &tx_ring->syncp); } __netif_tx_unlock(txq); } @@ -1321,9 +1394,8 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) rx_ring->rx_copybreak); if (unlikely(!skb)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.skb_alloc_fail++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "Failed to allocate skb. frags: %d\n", frags); return NULL; @@ -1395,9 +1467,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, return NULL; do { - dma_unmap_page(rx_ring->dev, - dma_unmap_addr(&rx_info->ena_buf, paddr), - ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); + ena_unmap_rx_buff(rx_ring, rx_info); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); @@ -1451,9 +1521,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l3_csum_err))) { /* ipv4 checksum error */ skb->ip_summed = CHECKSUM_NONE; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_csum++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX IPv4 header checksum error\n"); return; @@ -1464,9 +1533,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { if (unlikely(ena_rx_ctx->l4_csum_err)) { /* TCP/UDP checksum error */ - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_csum++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX L4 checksum error\n"); skb->ip_summed = CHECKSUM_NONE; @@ -1475,13 +1543,11 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, if (likely(ena_rx_ctx->l4_csum_checked)) { skb->ip_summed = CHECKSUM_UNNECESSARY; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.csum_good++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, + &rx_ring->syncp); } else { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.csum_unchecked++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, + &rx_ring->syncp); skb->ip_summed = CHECKSUM_NONE; } } else { @@ -1529,7 +1595,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) return XDP_DROP; - ret = ena_xdp_execute(rx_ring, xdp, rx_info); + ret = ena_xdp_execute(rx_ring, xdp); /* The xdp program might expand the headers */ if (ret == XDP_PASS) { @@ -1559,6 +1625,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, struct sk_buff *skb; int refill_required; struct xdp_buff xdp; + int xdp_flags = 0; int total_len = 0; int xdp_verdict; int rc = 0; @@ -1606,22 +1673,25 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, &next_to_clean); if (unlikely(!skb)) { - /* The page might not actually be freed here since the - * page reference count is incremented in - * ena_xdp_xmit_buff(), and it will be decreased only - * when send completion was received from the device - */ - if (xdp_verdict == XDP_TX) - ena_free_rx_page(rx_ring, - &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); for (i = 0; i < ena_rx_ctx.descs; i++) { - rx_ring->free_ids[next_to_clean] = - rx_ring->ena_bufs[i].req_id; + int req_id = rx_ring->ena_bufs[i].req_id; + + rx_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_RX_RING_IDX_NEXT(next_to_clean, rx_ring->ring_size); + + /* Packets was passed for transmission, unmap it + * from RX side. + */ + if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { + ena_unmap_rx_buff(rx_ring, + &rx_ring->rx_buffer_info[req_id]); + rx_ring->rx_buffer_info[req_id].page = NULL; + } } if (xdp_verdict != XDP_PASS) { + xdp_flags |= xdp_verdict; res_budget--; continue; } @@ -1667,20 +1737,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_refill_rx_bufs(rx_ring, refill_required); } + if (xdp_flags & XDP_REDIRECT) + xdp_do_flush_map(); + return work_done; error: adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_desc_num++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, + &rx_ring->syncp); adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; } else { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_req_id++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, + &rx_ring->syncp); adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; } @@ -1741,9 +1812,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring, tx_ring->smoothed_interval, true); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.unmask_interrupt++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, + &tx_ring->syncp); /* It is a shared MSI-X. * Tx and Rx CQ have pointer to it. @@ -1823,7 +1893,7 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) tx_pkts++; total_done += tx_info->tx_descs; - __free_page(tx_info->xdp_rx_page); + xdp_return_frame(xdpf); xdp_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, xdp_ring->ring_size); @@ -2550,9 +2620,8 @@ static int ena_up(struct ena_adapter *adapter) if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) netif_carrier_on(adapter->netdev); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.interface_up++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.interface_up, 1, + &adapter->syncp); set_bit(ENA_FLAG_DEV_UP, &adapter->flags); @@ -2590,9 +2659,8 @@ static void ena_down(struct ena_adapter *adapter) clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.interface_down++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.interface_down, 1, + &adapter->syncp); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -2820,15 +2888,12 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, (header_len < tx_ring->tx_max_header_size)) return 0; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.linearize++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); rc = skb_linearize(skb); if (unlikely(rc)) { - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.linearize_failed++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, + &tx_ring->syncp); } return rc; @@ -2868,9 +2933,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring, tx_ring->push_buf_intermediate_buf); *header_len = push_len; if (unlikely(skb->data != *push_hdr)) { - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.llq_buffer_copy++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, + &tx_ring->syncp); delta = push_len - skb_head_len; } @@ -2927,9 +2991,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring, return 0; error_report_dma_error: - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, + &tx_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); tx_info->skb = NULL; @@ -3006,9 +3069,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) __func__, qid); netif_tx_stop_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_stop++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, + &tx_ring->syncp); /* There is a rare condition where this function decide to * stop the queue but meanwhile clean_tx_irq updates @@ -3023,9 +3085,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_wakeup++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, + &tx_ring->syncp); } } @@ -3034,9 +3095,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * has a mb */ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.doorbells++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, + &tx_ring->syncp); } return NETDEV_TX_OK; @@ -3242,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_set_mac_address = NULL, .ndo_validate_addr = eth_validate_addr, .ndo_bpf = ena_xdp, + .ndo_xdp_xmit = ena_xdp_xmit, }; static int ena_device_validate_params(struct ena_adapter *adapter, @@ -3671,9 +3732,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, rc = -EIO; } - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx += missed_tx; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, + &tx_ring->syncp); return rc; } @@ -3756,9 +3816,8 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.empty_rx_ring++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, + &rx_ring->syncp); netif_err(adapter, drv, adapter->netdev, "Trigger refill for ring %d\n", i); @@ -3788,9 +3847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (unlikely(time_is_before_jiffies(keep_alive_expired))) { netif_err(adapter, drv, adapter->netdev, "Keep alive watchdog timeout.\n"); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.wd_expired++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.wd_expired, 1, + &adapter->syncp); adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } @@ -3801,9 +3859,8 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { netif_err(adapter, drv, adapter->netdev, "ENA admin queue is not in running state!\n"); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.admin_q_pause++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, + &adapter->syncp); adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } @@ -4176,18 +4233,36 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_dev->dmadev = &pdev->dev; + netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); + rc = -ENOMEM; + goto err_free_region; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + adapter = netdev_priv(netdev); + adapter->ena_dev = ena_dev; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + ena_dev->net_device = netdev; + + pci_set_drvdata(pdev, adapter); + rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; - goto err_free_region; + goto err_netdev_destroy; } rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); - goto err_free_ena_dev; + goto err_device_destroy; } calc_queue_ctx.ena_dev = ena_dev; @@ -4207,26 +4282,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - /* dev zeroed in init_etherdev */ - netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues); - if (!netdev) { - dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); - rc = -ENOMEM; - goto err_device_destroy; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); - - adapter->ena_dev = ena_dev; - adapter->netdev = netdev; - adapter->pdev = pdev; - ena_set_conf_feat_params(adapter, &get_feat_ctx); - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; @@ -4257,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { dev_err(&pdev->dev, "Failed to query interrupt moderation feature\n"); - goto err_netdev_destroy; + goto err_device_destroy; } ena_init_io_rings(adapter, 0, @@ -4335,11 +4392,11 @@ err_free_msix: ena_disable_msix(adapter); err_worker_destroy: del_timer(&adapter->timer_service); -err_netdev_destroy: - free_netdev(netdev); err_device_destroy: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); +err_netdev_destroy: + free_netdev(netdev); err_free_region: ena_release_bars(ena_dev, pdev); err_free_ena_dev: @@ -4439,9 +4496,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d) struct pci_dev *pdev = to_pci_dev(dev_d); struct ena_adapter *adapter = pci_get_drvdata(pdev); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.suspend++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); rtnl_lock(); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { @@ -4462,9 +4517,7 @@ static int __maybe_unused ena_resume(struct device *dev_d) struct ena_adapter *adapter = dev_get_drvdata(dev_d); int rc; - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.resume++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); rtnl_lock(); rc = ena_restore_device(adapter); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 30eb686749dc..74af15d62ee1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -170,12 +170,6 @@ struct ena_tx_buffer { * the xdp queues */ struct xdp_frame *xdpf; - /* The rx page for the rx buffer that was received in rx and - * re transmitted on xdp tx queues as a result of XDP_TX action. - * We need to free the page once we finished cleaning the buffer in - * clean_xdp_irq() - */ - struct page *xdp_rx_page; /* Indicate if bufs[0] map the linear data of the skb. */ u8 map_linear_data; @@ -239,6 +233,7 @@ struct ena_stats_rx { u64 xdp_pass; u64 xdp_tx; u64 xdp_invalid; + u64 xdp_redirect; }; struct ena_ring { @@ -263,6 +258,7 @@ struct ena_ring { struct ena_com_io_sq *ena_com_io_sq; struct bpf_prog *xdp_bpf_prog; struct xdp_rxq_info xdp_rxq; + spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */ u16 next_to_use; u16 next_to_clean; @@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring) return !!ring->xdp_bpf_prog; } -static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter, - u32 queues) +static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter, + u32 queues) { return 2 * queues <= adapter->max_num_io_queues; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 7b80d924632a..f016f2e12ee7 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2549,7 +2549,6 @@ static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) */ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) { - s32 ret_val; s16 mii_autoneg_adv_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ @@ -2605,12 +2604,7 @@ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; - ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); - - if (ret_val) - return ret_val; - - return 0; + return atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); } /* diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 85858163bac5..e432a68ac520 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -23,6 +23,7 @@ config MACB tristate "Cadence MACB/GEM support" depends on HAS_DMA && COMMON_CLK select PHYLINK + select CRC32 help The Cadence MACB ethernet interface is found on many Atmel AT32 and AT91 parts. This driver also supports the Cadence GEM (Gigabit diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 1f5da4e4f4b2..d8c68906525a 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -699,6 +699,7 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_CLK_HW_CHG 0x04000000 #define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 @@ -1147,6 +1148,14 @@ struct macb_pm_data { u32 usrio; }; +struct macb_usrio_config { + u32 mii; + u32 rmii; + u32 rgmii; + u32 refclk; + u32 hdfctlen; +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -1155,6 +1164,7 @@ struct macb_config { struct clk **rx_clk, struct clk **tsu_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; + const struct macb_usrio_config *usrio; }; struct tsu_incr { @@ -1253,8 +1263,6 @@ struct macb { /* AT91RM9200 transmit queue (1 on wire + 1 queued) */ struct macb_tx_skb rm9200_txq[2]; - unsigned int rm9200_tx_tail; - unsigned int rm9200_tx_len; unsigned int max_tx_length; u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES]; @@ -1288,6 +1296,7 @@ struct macb { u32 rx_intr_mask; struct macb_pm_data pm_data; + const struct macb_usrio_config *usrio; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 7b1d195787dc..d5d910916c2e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -460,15 +460,14 @@ static void macb_init_buffers(struct macb *bp) /** * macb_set_tx_clk() - Set a clock to a new frequency - * @clk: Pointer to the clock to change + * @bp: pointer to struct macb * @speed: New frequency in Hz - * @dev: Pointer to the struct net_device */ -static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) +static void macb_set_tx_clk(struct macb *bp, int speed) { long ferr, rate, rate_rounded; - if (!clk) + if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG)) return; switch (speed) { @@ -485,7 +484,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) return; } - rate_rounded = clk_round_rate(clk, rate); + rate_rounded = clk_round_rate(bp->tx_clk, rate); if (rate_rounded < 0) return; @@ -495,11 +494,12 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) ferr = abs(rate_rounded - rate); ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5) - netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", + netdev_warn(bp->dev, + "unable to generate target frequency: %ld Hz\n", rate); - if (clk_set_rate(clk, rate_rounded)) - netdev_err(dev, "adjusting tx_clk failed.\n"); + if (clk_set_rate(bp->tx_clk, rate_rounded)) + netdev_err(bp->dev, "adjusting tx_clk failed.\n"); } static void macb_validate(struct phylink_config *config, @@ -751,7 +751,7 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - macb_set_tx_clk(bp->tx_clk, speed, ndev); + macb_set_tx_clk(bp, speed); /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. @@ -3694,6 +3694,20 @@ static void macb_probe_queues(void __iomem *mem, *num_queues = hweight32(*queue_mask); } +static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, + struct clk *rx_clk, struct clk *tsu_clk) +{ + struct clk_bulk_data clks[] = { + { .clk = tsu_clk, }, + { .clk = rx_clk, }, + { .clk = pclk, }, + { .clk = hclk, }, + { .clk = tx_clk }, + }; + + clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks); +} + static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk, struct clk **tsu_clk) @@ -3913,15 +3927,15 @@ static int macb_init(struct platform_device *pdev) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; if (phy_interface_mode_is_rgmii(bp->phy_interface)) - val = GEM_BIT(RGMII); + val = bp->usrio->rgmii; else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) - val = MACB_BIT(RMII); + val = bp->usrio->rmii; else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) - val = MACB_BIT(MII); + val = bp->usrio->mii; if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) - val |= MACB_BIT(CLKEN); + val |= bp->usrio->refclk; macb_or_gem_writel(bp, USRIO, val); } @@ -4032,7 +4046,6 @@ static int at91ether_start(struct macb *lp) MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | - MACB_BIT(RM9200_TBRE) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); @@ -4049,7 +4062,6 @@ static void at91ether_stop(struct macb *lp) MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | - MACB_BIT(RM9200_TBRE) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); @@ -4119,10 +4131,11 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macb *lp = netdev_priv(dev); - unsigned long flags; - if (lp->rm9200_tx_len < 2) { - int desc = lp->rm9200_tx_tail; + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + int desc = 0; + + netif_stop_queue(dev); /* Store packet information (to free when Tx completed) */ lp->rm9200_txq[desc].skb = skb; @@ -4136,15 +4149,6 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&lp->lock, flags); - - lp->rm9200_tx_tail = (desc + 1) & 1; - lp->rm9200_tx_len++; - if (lp->rm9200_tx_len > 1) - netif_stop_queue(dev); - - spin_unlock_irqrestore(&lp->lock, flags); - /* Set address of the data in the Transmit Address register */ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); /* Set length of the packet in the Transmit Control register */ @@ -4210,8 +4214,6 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) struct macb *lp = netdev_priv(dev); u32 intstatus, ctl; unsigned int desc; - unsigned int qlen; - u32 tsr; /* MAC Interrupt Status register indicates what interrupts are pending. * It is automatically cleared once read. @@ -4223,39 +4225,21 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) at91ether_rx(dev); /* Transmit complete */ - if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) { + if (intstatus & MACB_BIT(TCOMP)) { /* The TCOM bit is set even if the transmission failed */ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) dev->stats.tx_errors++; - spin_lock(&lp->lock); - - tsr = macb_readl(lp, TSR); - - /* we have three possibilities here: - * - all pending packets transmitted (TGO, implies BNQ) - * - only first packet transmitted (!TGO && BNQ) - * - two frames pending (!TGO && !BNQ) - * Note that TGO ("transmit go") is called "IDLE" on RM9200. - */ - qlen = (tsr & MACB_BIT(TGO)) ? 0 : - (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2; - - while (lp->rm9200_tx_len > qlen) { - desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1; + desc = 0; + if (lp->rm9200_txq[desc].skb) { dev_consume_skb_irq(lp->rm9200_txq[desc].skb); lp->rm9200_txq[desc].skb = NULL; dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, lp->rm9200_txq[desc].size, DMA_TO_DEVICE); dev->stats.tx_packets++; dev->stats.tx_bytes += lp->rm9200_txq[desc].size; - lp->rm9200_tx_len--; } - - if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev)) - netif_wake_queue(dev); - - spin_unlock(&lp->lock); + netif_wake_queue(dev); } /* Work-around for EMAC Errata section 41.3.1 */ @@ -4406,8 +4390,10 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, return err; mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); - if (!mgmt) - return -ENOMEM; + if (!mgmt) { + err = -ENOMEM; + goto err_disable_clks; + } init.name = "sifive-gemgxl-mgmt"; init.ops = &fu540_c000_ops; @@ -4418,16 +4404,26 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->hw.init = &init; *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); - if (IS_ERR(*tx_clk)) - return PTR_ERR(*tx_clk); + if (IS_ERR(*tx_clk)) { + err = PTR_ERR(*tx_clk); + goto err_disable_clks; + } err = clk_prepare_enable(*tx_clk); - if (err) + if (err) { dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); - else + *tx_clk = NULL; + goto err_disable_clks; + } else { dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); + } return 0; + +err_disable_clks: + macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk); + + return err; } static int fu540_c000_init(struct platform_device *pdev) @@ -4439,6 +4435,21 @@ static int fu540_c000_init(struct platform_device *pdev) return macb_init(pdev); } +static const struct macb_usrio_config macb_default_usrio = { + .mii = MACB_BIT(MII), + .rmii = MACB_BIT(RMII), + .rgmii = GEM_BIT(RGMII), + .refclk = MACB_BIT(CLKEN), +}; + +static const struct macb_usrio_config sama7g5_usrio = { + .mii = 0, + .rmii = 1, + .rgmii = 2, + .refclk = BIT(2), + .hdfctlen = BIT(6), +}; + static const struct macb_config fu540_c000_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | MACB_CAPS_GEM_HAS_PTP, @@ -4446,12 +4457,14 @@ static const struct macb_config fu540_c000_config = { .clk_init = fu540_c000_clk_init, .init = fu540_c000_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config at91sam9260_config = { .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d3macb_config = { @@ -4459,6 +4472,7 @@ static const struct macb_config sama5d3macb_config = { | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config pc302gem_config = { @@ -4466,6 +4480,7 @@ static const struct macb_config pc302gem_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d2_config = { @@ -4473,6 +4488,7 @@ static const struct macb_config sama5d2_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d3_config = { @@ -4482,6 +4498,7 @@ static const struct macb_config sama5d3_config = { .clk_init = macb_clk_init, .init = macb_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d4_config = { @@ -4489,18 +4506,21 @@ static const struct macb_config sama5d4_config = { .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config emac_config = { .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, .clk_init = at91ether_clk_init, .init = at91ether_init, + .usrio = &macb_default_usrio, }; static const struct macb_config np4_config = { .caps = MACB_CAPS_USRIO_DISABLED, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config zynqmp_config = { @@ -4511,6 +4531,7 @@ static const struct macb_config zynqmp_config = { .clk_init = macb_clk_init, .init = macb_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config zynq_config = { @@ -4519,6 +4540,23 @@ static const struct macb_config zynq_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, +}; + +static const struct macb_config sama7g5_gem_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &sama7g5_usrio, +}; + +static const struct macb_config sama7g5_emac_config = { + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &sama7g5_usrio, }; static const struct of_device_id macb_dt_ids[] = { @@ -4538,6 +4576,8 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, + { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config }, + { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); @@ -4640,6 +4680,8 @@ static int macb_probe(struct platform_device *pdev) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + bp->usrio = macb_config->usrio; + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -4735,11 +4777,7 @@ err_out_free_netdev: free_netdev(dev); err_disable_clocks: - clk_disable_unprepare(tx_clk); - clk_disable_unprepare(hclk); - clk_disable_unprepare(pclk); - clk_disable_unprepare(rx_clk); - clk_disable_unprepare(tsu_clk); + macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); @@ -4764,11 +4802,8 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); - clk_disable_unprepare(bp->tsu_clk); + macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, + bp->rx_clk, bp->tsu_clk); pm_runtime_set_suspended(&pdev->dev); } phylink_destroy(bp->phylink); @@ -4947,13 +4982,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(dev))) { - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); - } - clk_disable_unprepare(bp->tsu_clk); + if (!(device_may_wakeup(dev))) + macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); + else + macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); return 0; } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 7f90b828d159..1b7e8c91b541 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -987,9 +987,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, struct fw_eth_tx_pkt_wr *wr; struct cpl_tx_pkt_core *cpl; u32 ctrl, iplen, maclen; -#if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ip6; -#endif unsigned int ndesc; struct tcphdr *tcp; int len16, pktlen; @@ -1043,17 +1041,15 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, cpl->len = htons(pktlen); memcpy(buf, skb->data, pktlen); - if (tx_info->ip_family == AF_INET) { + if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) { /* we need to correct ip header len */ ip = (struct iphdr *)(buf + maclen); ip->tot_len = htons(pktlen - maclen); cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP); -#if IS_ENABLED(CONFIG_IPV6) } else { ip6 = (struct ipv6hdr *)(buf + maclen); ip6->payload_len = htons(pktlen - maclen - iplen); cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6); -#endif } cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) | diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index 9c682aff3834..519323460f26 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -36,8 +36,6 @@ void vnic_cq_free(struct vnic_cq *cq) int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { - int err; - cq->index = index; cq->vdev = vdev; @@ -47,11 +45,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, return -EINVAL; } - err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); - if (err) - return err; - - return 0; + return vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); } void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 676e437d78f6..d402d83d9edd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4677,7 +4677,6 @@ static int be_if_create(struct be_adapter *adapter) { u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; u32 cap_flags = be_if_cap_flags(adapter); - int status; /* alloc required memory for other filtering fields */ adapter->pmac_id = kcalloc(be_max_uc(adapter), @@ -4700,13 +4699,8 @@ static int be_if_create(struct be_adapter *adapter) en_flags &= cap_flags; /* will enable all the needed filter flags in be_open() */ - status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); - - if (status) - return status; - - return 0; } int be_update_queues(struct be_adapter *adapter) diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig index c2677ec0564d..3d1e9a302148 100644 --- a/drivers/net/ethernet/faraday/Kconfig +++ b/drivers/net/ethernet/faraday/Kconfig @@ -33,6 +33,7 @@ config FTGMAC100 depends on !64BIT || BROKEN select PHYLIB select MDIO_ASPEED if MACH_ASPEED_G6 + select CRC32 help This driver supports the FTGMAC100 Gigabit Ethernet controller from Faraday. It is used on Faraday A369, Andes AG102 and some diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index a1d53ddf1593..3f9175bdce77 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -25,6 +25,7 @@ config FEC depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || SOC_IMX28 || COMPILE_TEST) default ARCH_MXC || SOC_IMX28 if ARM + select CRC32 select PHYLIB imply PTP_1588_CLOCK help diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e28510c282e5..4360ce4d3fb6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1625,17 +1625,13 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) { struct dpaa_bp *dpaa_bp; int *countptr; - int res; dpaa_bp = priv->dpaa_bp; if (!dpaa_bp) return -EINVAL; countptr = this_cpu_ptr(dpaa_bp->percpu_count); - res = dpaa_eth_refill_bpool(dpaa_bp, countptr); - if (res) - return res; - return 0; + return dpaa_eth_refill_bpool(dpaa_bp, countptr); } /* Cleanup function for outgoing frame descriptors that were built on Tx path, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index 90cd243070d7..828c177df03d 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -269,6 +269,7 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac, if (!of_device_is_available(node)) { netdev_err(mac->net_dev, "pcs-handle node not available\n"); + of_node_put(node); return -ENODEV; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 8ed1ebd5a183..89e558135432 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -143,8 +143,8 @@ static const struct { { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, - { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, - { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, + { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" }, + { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" }, { ENETC_PM0_ROVR, "MAC rx oversized packets" }, { ENETC_PM0_RJBR, "MAC rx jabber packets" }, { ENETC_PM0_RFRG, "MAC rx fragment packets" }, @@ -163,9 +163,13 @@ static const struct { { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, { ENETC_PM0_TPKT, "MAC tx packets" }, { ENETC_PM0_TUND, "MAC tx undersized packets" }, + { ENETC_PM0_T64, "MAC tx 64 byte packets" }, { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, + { ENETC_PM0_T255, "MAC tx 128-255 byte packets" }, + { ENETC_PM0_T511, "MAC tx 256-511 byte packets" }, { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, - { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, + { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" }, + { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" }, { ENETC_PM0_TCNP, "MAC tx control packets" }, { ENETC_PM0_TDFR, "MAC tx deferred packets" }, { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index d18f439f2b81..e1e950d48c92 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -267,8 +267,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_R255 0x8180 #define ENETC_PM0_R511 0x8188 #define ENETC_PM0_R1023 0x8190 -#define ENETC_PM0_R1518 0x8198 -#define ENETC_PM0_R1519X 0x81A0 +#define ENETC_PM0_R1522 0x8198 +#define ENETC_PM0_R1523X 0x81A0 #define ENETC_PM0_ROVR 0x81A8 #define ENETC_PM0_RJBR 0x81B0 #define ENETC_PM0_RFRG 0x81B8 @@ -287,9 +287,13 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_TBCA 0x8250 #define ENETC_PM0_TPKT 0x8260 #define ENETC_PM0_TUND 0x8268 +#define ENETC_PM0_T64 0x8270 #define ENETC_PM0_T127 0x8278 +#define ENETC_PM0_T255 0x8280 +#define ENETC_PM0_T511 0x8288 #define ENETC_PM0_T1023 0x8290 -#define ENETC_PM0_T1518 0x8298 +#define ENETC_PM0_T1522 0x8298 +#define ENETC_PM0_T1523X 0x82A0 #define ENETC_PM0_TCNP 0x82C0 #define ENETC_PM0_TDFR 0x82D0 #define ENETC_PM0_TMCOL 0x82D8 diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig index 34150182cc35..48bf8088795d 100644 --- a/drivers/net/ethernet/freescale/fman/Kconfig +++ b/drivers/net/ethernet/freescale/fman/Kconfig @@ -4,6 +4,7 @@ config FSL_FMAN depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST select GENERIC_ALLOCATOR select PHYLIB + select CRC32 default n help Freescale Data-Path Acceleration Architecture Frame Manager diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index c6481bd61239..9d58d8334467 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -430,7 +430,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) return -ENOMEM; priv = new_bus->priv; - new_bus->name = "Freescale PowerQUICC MII Bus", + new_bus->name = "Freescale PowerQUICC MII Bus"; new_bus->read = &fsl_pq_mdio_read; new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index f5c80229ea96..daf07c0f790b 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -38,6 +38,8 @@ #define NIC_TX_STATS_REPORT_NUM 0 #define NIC_RX_STATS_REPORT_NUM 4 +#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -49,7 +51,8 @@ struct gve_rx_desc_queue { struct gve_rx_slot_page_info { struct page *page; void *page_address; - u32 page_offset; /* offset to write to in page */ + u8 page_offset; /* flipped to second half? */ + u8 can_flip; }; /* A list of pages registered with the device during setup and used by a queue @@ -64,10 +67,11 @@ struct gve_queue_page_list { /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ struct gve_rx_data_queue { - struct gve_rx_data_slot *data_ring; /* read by NIC */ + union gve_rx_data_slot *data_ring; /* read by NIC */ dma_addr_t data_bus; /* dma mapping of the slots */ struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ + u8 raw_addressing; /* use raw_addressing? */ }; struct gve_priv; @@ -82,6 +86,7 @@ struct gve_rx_ring { u32 cnt; /* free-running total number of completed packets */ u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ + u32 db_threshold; /* threshold for posting new buffs and descs */ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ u64 rx_copied_pkt; /* free-running total number of copied packets */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ @@ -107,12 +112,20 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; +struct gve_tx_dma_buf { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ - struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + union { + struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + struct gve_tx_dma_buf buf; + }; }; /* A TX buffer - each queue has one */ @@ -135,13 +148,17 @@ struct gve_tx_ring { __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */ u64 pkt_done; /* free-running - total packets completed */ u64 bytes_done; /* free-running - total bytes completed */ + u64 dropped_pkt; /* free-running - total packets dropped */ + u64 dma_mapping_error; /* count of dma mapping errors */ /* Cacheline 2 -- Read-mostly fields */ union gve_tx_desc *desc ____cacheline_aligned; struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */ struct netdev_queue *netdev_txq; struct gve_queue_resources *q_resources; /* head and tail pointer idx */ + struct device *dev; u32 mask; /* masks req and done down to queue size */ + u8 raw_addressing; /* use raw_addressing? */ /* Slow-path fields */ u32 q_num ____cacheline_aligned; /* queue idx */ @@ -194,11 +211,12 @@ struct gve_priv { u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ u16 tx_pages_per_qpl; /* tx buffer length */ - u16 rx_pages_per_qpl; /* rx buffer length */ + u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ + u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */ struct gve_queue_config tx_cfg; struct gve_queue_config rx_cfg; @@ -436,14 +454,14 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) */ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) { - return priv->tx_cfg.num_queues; + return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues; } /* Returns the number of rx queue page lists */ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) { - return priv->rx_cfg.num_queues; + return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues; } /* Returns a pointer to the next available tx qpl in the list of qpls @@ -497,15 +515,6 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, return DMA_FROM_DEVICE; } -/* Returns true if the max mtu allows page recycling */ -static inline bool gve_can_recycle_pages(struct net_device *dev) -{ - /* We can't recycle the pages if we can't fit a packet into half a - * page. - */ - return dev->max_mtu <= PAGE_SIZE / 2; -} - /* buffers */ int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 24ae6a28a806..53864f200599 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -14,6 +14,57 @@ #define GVE_ADMINQ_SLEEP_LEN 20 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 +#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ +"Expected: length=%d, feature_mask=%x.\n" \ +"Actual: length=%d, feature_mask=%x.\n" + +static +struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, + struct gve_device_option *option) +{ + void *option_end, *descriptor_end; + + option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); + descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); + + return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; +} + +static +void gve_parse_device_option(struct gve_priv *priv, + struct gve_device_descriptor *device_descriptor, + struct gve_device_option *option) +{ + u16 option_length = be16_to_cpu(option->option_length); + u16 option_id = be16_to_cpu(option->option_id); + + switch (option_id) { + case GVE_DEV_OPT_ID_RAW_ADDRESSING: + /* If the length or feature mask doesn't match, + * continue without enabling the feature. + */ + if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING || + option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing", + GVE_DEV_OPT_LEN_RAW_ADDRESSING, + cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING), + option_length, option->feat_mask); + priv->raw_addressing = 0; + } else { + dev_info(&priv->pdev->dev, + "Raw addressing device option enabled.\n"); + priv->raw_addressing = 1; + } + break; + default: + /* If we don't recognize the option just continue + * without doing anything. + */ + dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", + option_id); + } +} + int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) { priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE, @@ -318,8 +369,10 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) { struct gve_tx_ring *tx = &priv->tx[queue_index]; union gve_adminq_command cmd; + u32 qpl_id; int err; + qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { @@ -328,7 +381,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) .queue_resources_addr = cpu_to_be64(tx->q_resources_bus), .tx_ring_addr = cpu_to_be64(tx->bus), - .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id), + .queue_page_list_id = cpu_to_be32(qpl_id), .ntfy_id = cpu_to_be32(tx->ntfy_id), }; @@ -357,8 +410,10 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) { struct gve_rx_ring *rx = &priv->rx[queue_index]; union gve_adminq_command cmd; + u32 qpl_id; int err; + qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { @@ -369,7 +424,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus), .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus), - .queue_page_list_id = cpu_to_be32(rx->data.qpl->id), + .queue_page_list_id = cpu_to_be32(qpl_id), }; err = gve_adminq_issue_cmd(priv, &cmd); @@ -460,11 +515,14 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_describe_device(struct gve_priv *priv) { struct gve_device_descriptor *descriptor; + struct gve_device_option *dev_opt; union gve_adminq_command cmd; dma_addr_t descriptor_bus; + u16 num_options; int err = 0; u8 *mac; u16 mtu; + int i; memset(&cmd, 0, sizeof(cmd)); descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE, @@ -511,13 +569,30 @@ int gve_adminq_describe_device(struct gve_priv *priv) mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); - priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl); - if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) { - dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", - priv->rx_pages_per_qpl); - priv->rx_desc_cnt = priv->rx_pages_per_qpl; + priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl); + if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) { + dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", + priv->rx_data_slot_cnt); + priv->rx_desc_cnt = priv->rx_data_slot_cnt; } priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); + dev_opt = (void *)(descriptor + 1); + + num_options = be16_to_cpu(descriptor->num_device_options); + for (i = 0; i < num_options; i++) { + struct gve_device_option *next_opt; + + next_opt = gve_get_next_option(descriptor, dev_opt); + if (!next_opt) { + dev_err(&priv->dev->dev, + "options exceed device_descriptor's total length.\n"); + err = -EINVAL; + goto free_device_descriptor; + } + + gve_parse_device_option(priv, descriptor, dev_opt); + dev_opt = next_opt; + } free_device_descriptor: dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 015796a20118..d320c2ffd87c 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -79,12 +79,17 @@ struct gve_device_descriptor { static_assert(sizeof(struct gve_device_descriptor) == 40); -struct device_option { - __be32 option_id; - __be32 option_length; +struct gve_device_option { + __be16 option_id; + __be16 option_length; + __be32 feat_mask; }; -static_assert(sizeof(struct device_option) == 8); +static_assert(sizeof(struct gve_device_option) == 8); + +#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1 +#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0 +#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0 struct gve_adminq_configure_device_resources { __be64 counter_array; @@ -111,6 +116,8 @@ struct gve_adminq_unregister_page_list { static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4); +#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF + struct gve_adminq_create_tx_queue { __be32 queue_id; __be32 reserved; diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h index 54779871d52e..05ae6300e984 100644 --- a/drivers/net/ethernet/google/gve/gve_desc.h +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -16,9 +16,11 @@ * Base addresses encoded in seg_addr are not assumed to be physical * addresses. The ring format assumes these come from some linear address * space. This could be physical memory, kernel virtual memory, user virtual - * memory. gVNIC uses lists of registered pages. Each queue is assumed - * to be associated with a single such linear address space to ensure a - * consistent meaning for seg_addrs posted to its rings. + * memory. + * If raw dma addressing is not supported then gVNIC uses lists of registered + * pages. Each queue is assumed to be associated with a single such linear + * address space to ensure a consistent meaning for seg_addrs posted to its + * rings. */ struct gve_tx_pkt_desc { @@ -72,12 +74,15 @@ struct gve_rx_desc { } __packed; static_assert(sizeof(struct gve_rx_desc) == 64); -/* As with the Tx ring format, the qpl_offset entries below are offsets into an - * ordered list of registered pages. +/* If the device supports raw dma addressing then the addr in data slot is + * the dma address of the buffer. + * If the device only supports registered segments then the addr is a byte + * offset into the registered segment (an ordered list of pages) where the + * buffer is. */ -struct gve_rx_data_slot { - /* byte offset into the rx registered segment of this slot */ +union gve_rx_data_slot { __be64 qpl_offset; + __be64 addr; }; /* GVE Recive Packet Descriptor Seq No */ diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 2fb197fd3daf..0901fa6853ca 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -51,6 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", + "tx_dma_mapping_error[%u]", }; static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { @@ -323,6 +324,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = tx->stop_queue; data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, tx)); + data[i++] = tx->dma_mapping_error; /* stats from NIC */ if (skip_nic_stats) { /* skip NIC tx stats */ diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 02e7d74779f4..7302498c6df3 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -677,6 +677,10 @@ static int gve_alloc_qpls(struct gve_priv *priv) int i, j; int err; + /* Raw addressing means no QPLs */ + if (priv->raw_addressing) + return 0; + priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; @@ -689,7 +693,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) } for (; i < num_qpls; i++) { err = gve_alloc_queue_page_list(priv, i, - priv->rx_pages_per_qpl); + priv->rx_data_slot_cnt); if (err) goto free_qpls; } @@ -717,6 +721,10 @@ static void gve_free_qpls(struct gve_priv *priv) int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int i; + /* Raw addressing means no QPLs */ + if (priv->raw_addressing) + return; + kvfree(priv->qpl_cfg.qpl_id_map); for (i = 0; i < num_qpls; i++) @@ -1077,6 +1085,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) if (skip_describe_device) goto setup_device; + priv->raw_addressing = false; /* Get the initial information we need from the device */ err = gve_adminq_describe_device(priv); if (err) { diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 008fa897a3e6..bf123fe524c4 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -16,12 +16,39 @@ static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) block->rx = NULL; } +static void gve_rx_free_buffer(struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & + GVE_DATA_SLOT_ADDR_PAGE_MASK); + + gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); +} + +static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + if (rx->data.raw_addressing) { + u32 slots = rx->mask + 1; + int i; + + for (i = 0; i < slots; i++) + gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + } else { + gve_unassign_qpl(priv, rx->data.qpl->id); + rx->data.qpl = NULL; + } + kvfree(rx->data.page_info); + rx->data.page_info = NULL; +} + static void gve_rx_free_ring(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; struct device *dev = &priv->pdev->dev; + u32 slots = rx->mask + 1; size_t bytes; - u32 slots; gve_rx_remove_from_block(priv, idx); @@ -33,11 +60,8 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL; - gve_unassign_qpl(priv, rx->data.qpl->id); - rx->data.qpl = NULL; - kvfree(rx->data.page_info); + gve_rx_unfill_pages(priv, rx); - slots = rx->mask + 1; bytes = sizeof(*rx->data.data_ring) * slots; dma_free_coherent(dev, bytes, rx->data.data_ring, rx->data.data_bus); @@ -46,19 +70,35 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) } static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, - struct gve_rx_data_slot *slot, - dma_addr_t addr, struct page *page) + dma_addr_t addr, struct page *page, __be64 *slot_addr) { page_info->page = page; page_info->page_offset = 0; page_info->page_address = page_address(page); - slot->qpl_offset = cpu_to_be64(addr); + *slot_addr = cpu_to_be64(addr); +} + +static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + struct page *page; + dma_addr_t dma; + int err; + + err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE); + if (err) + return err; + + gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); + return 0; } static int gve_prefill_rx_pages(struct gve_rx_ring *rx) { struct gve_priv *priv = rx->gve; u32 slots; + int err; int i; /* Allocate one page per Rx queue slot. Each page is split into two @@ -71,17 +111,30 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) if (!rx->data.page_info) return -ENOMEM; - rx->data.qpl = gve_assign_rx_qpl(priv); - + if (!rx->data.raw_addressing) + rx->data.qpl = gve_assign_rx_qpl(priv); for (i = 0; i < slots; i++) { - struct page *page = rx->data.qpl->pages[i]; - dma_addr_t addr = i * PAGE_SIZE; + if (!rx->data.raw_addressing) { + struct page *page = rx->data.qpl->pages[i]; + dma_addr_t addr = i * PAGE_SIZE; - gve_setup_rx_buffer(&rx->data.page_info[i], - &rx->data.data_ring[i], addr, page); + gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, + &rx->data.data_ring[i].qpl_offset); + continue; + } + err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + if (err) + goto alloc_err; } return slots; +alloc_err: + while (i--) + gve_rx_free_buffer(&priv->pdev->dev, + &rx->data.page_info[i], + &rx->data.data_ring[i]); + return err; } static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) @@ -110,8 +163,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) rx->gve = priv; rx->q_num = idx; - slots = priv->rx_pages_per_qpl; + slots = priv->rx_data_slot_cnt; rx->mask = slots - 1; + rx->data.raw_addressing = priv->raw_addressing; /* alloc rx data ring */ bytes = sizeof(*rx->data.data_ring) * slots; @@ -156,8 +210,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) err = -ENOMEM; goto abort_with_q_resources; } - rx->mask = slots - 1; rx->cnt = 0; + rx->db_threshold = priv->rx_desc_cnt / 2; rx->desc.seqno = 1; gve_rx_add_to_block(priv, idx); @@ -168,7 +222,7 @@ abort_with_q_resources: rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL; abort_filled: - kvfree(rx->data.page_info); + gve_rx_unfill_pages(priv, rx); abort_with_slots: bytes = sizeof(*rx->data.data_ring) * slots; dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); @@ -225,15 +279,14 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) return PKT_HASH_TYPE_L2; } -static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx, - struct net_device *dev, +static struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len) { struct sk_buff *skb = napi_alloc_skb(napi, len); void *va = page_info->page_address + GVE_RX_PAD + - page_info->page_offset; + (page_info->page_offset ? PAGE_SIZE / 2 : 0); if (unlikely(!skb)) return NULL; @@ -244,15 +297,10 @@ static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx, skb->protocol = eth_type_trans(skb, dev); - u64_stats_update_begin(&rx->statss); - rx->rx_copied_pkt++; - u64_stats_update_end(&rx->statss); - return skb; } -static struct sk_buff *gve_rx_add_frags(struct net_device *dev, - struct napi_struct *napi, +static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len) { @@ -262,20 +310,92 @@ static struct sk_buff *gve_rx_add_frags(struct net_device *dev, return NULL; skb_add_rx_frag(skb, 0, page_info->page, - page_info->page_offset + + (page_info->page_offset ? PAGE_SIZE / 2 : 0) + GVE_RX_PAD, len, PAGE_SIZE / 2); return skb; } -static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, - struct gve_rx_data_slot *data_ring) +static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr) +{ + const __be64 offset = cpu_to_be64(PAGE_SIZE / 2); + + /* "flip" to other packet buffer on this page */ + page_info->page_offset ^= 0x1; + *(slot_addr) ^= offset; +} + +static bool gve_rx_can_flip_buffers(struct net_device *netdev) +{ + return PAGE_SIZE == 4096 + ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false; +} + +static int gve_rx_can_recycle_buffer(struct page *page) +{ + int pagecount = page_count(page); + + /* This page is not being used by any SKBs - reuse */ + if (pagecount == 1) + return 1; + /* This page is still being used by an SKB - we can't reuse */ + else if (pagecount >= 2) + return 0; + WARN(pagecount < 1, "Pagecount should never be < 1"); + return -1; +} + +static struct sk_buff * +gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, + struct gve_rx_slot_page_info *page_info, u16 len, + struct napi_struct *napi, + union gve_rx_data_slot *data_slot) { - u64 addr = be64_to_cpu(data_ring->qpl_offset); + struct sk_buff *skb; + + skb = gve_rx_add_frags(napi, page_info, len); + if (!skb) + return NULL; - page_info->page_offset ^= PAGE_SIZE / 2; - addr ^= PAGE_SIZE / 2; - data_ring->qpl_offset = cpu_to_be64(addr); + /* Optimistically stop the kernel from freeing the page by increasing + * the page bias. We will check the refcount in refill to determine if + * we need to alloc a new page. + */ + get_page(page_info->page); + + return skb; +} + +static struct sk_buff * +gve_rx_qpl(struct device *dev, struct net_device *netdev, + struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, + u16 len, struct napi_struct *napi, + union gve_rx_data_slot *data_slot) +{ + struct sk_buff *skb; + + /* if raw_addressing mode is not enabled gvnic can only receive into + * registered segments. If the buffer can't be recycled, our only + * choice is to copy the data out of it so that we can return it to the + * device. + */ + if (page_info->can_flip) { + skb = gve_rx_add_frags(napi, page_info, len); + /* No point in recycling if we didn't get the skb */ + if (skb) { + /* Make sure that the page isn't freed. */ + get_page(page_info->page); + gve_rx_flip_buff(page_info, &data_slot->qpl_offset); + } + } else { + skb = gve_rx_copy(netdev, napi, page_info, len); + if (skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + u64_stats_update_end(&rx->statss); + } + } + return skb; } static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, @@ -285,8 +405,9 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, struct gve_priv *priv = rx->gve; struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; struct net_device *dev = priv->dev; - struct sk_buff *skb; - int pagecount; + union gve_rx_data_slot *data_slot; + struct sk_buff *skb = NULL; + dma_addr_t page_bus; u16 len; /* drop this packet */ @@ -294,71 +415,55 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, u64_stats_update_begin(&rx->statss); rx->rx_desc_err_dropped_pkt++; u64_stats_update_end(&rx->statss); - return true; + return false; } len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; page_info = &rx->data.page_info[idx]; - dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], - PAGE_SIZE, DMA_FROM_DEVICE); - /* gvnic can only receive into registered segments. If the buffer - * can't be recycled, our only choice is to copy the data out of - * it so that we can return it to the device. - */ + data_slot = &rx->data.data_ring[idx]; + page_bus = (rx->data.raw_addressing) ? + be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK : + rx->data.qpl->page_buses[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, + PAGE_SIZE, DMA_FROM_DEVICE); - if (PAGE_SIZE == 4096) { - if (len <= priv->rx_copybreak) { - /* Just copy small packets */ - skb = gve_rx_copy(rx, dev, napi, page_info, len); - u64_stats_update_begin(&rx->statss); - rx->rx_copybreak_pkt++; - u64_stats_update_end(&rx->statss); - goto have_skb; - } - if (unlikely(!gve_can_recycle_pages(dev))) { - skb = gve_rx_copy(rx, dev, napi, page_info, len); - goto have_skb; - } - pagecount = page_count(page_info->page); - if (pagecount == 1) { - /* No part of this page is used by any SKBs; we attach - * the page fragment to a new SKB and pass it up the - * stack. - */ - skb = gve_rx_add_frags(dev, napi, page_info, len); - if (!skb) { - u64_stats_update_begin(&rx->statss); - rx->rx_skb_alloc_fail++; - u64_stats_update_end(&rx->statss); - return true; + if (len <= priv->rx_copybreak) { + /* Just copy small packets */ + skb = gve_rx_copy(dev, napi, page_info, len); + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + rx->rx_copybreak_pkt++; + u64_stats_update_end(&rx->statss); + } else { + u8 can_flip = gve_rx_can_flip_buffers(dev); + int recycle = 0; + + if (can_flip) { + recycle = gve_rx_can_recycle_buffer(page_info->page); + if (recycle < 0) { + if (!rx->data.raw_addressing) + gve_schedule_reset(priv); + return false; } - /* Make sure the kernel stack can't release the page */ - get_page(page_info->page); - /* "flip" to other packet buffer on this page */ - gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]); - } else if (pagecount >= 2) { - /* We have previously passed the other half of this - * page up the stack, but it has not yet been freed. - */ - skb = gve_rx_copy(rx, dev, napi, page_info, len); + } + + page_info->can_flip = can_flip && recycle; + if (rx->data.raw_addressing) { + skb = gve_rx_raw_addressing(&priv->pdev->dev, dev, + page_info, len, napi, + data_slot); } else { - WARN(pagecount < 1, "Pagecount should never be < 1"); - return false; + skb = gve_rx_qpl(&priv->pdev->dev, dev, rx, + page_info, len, napi, data_slot); } - } else { - skb = gve_rx_copy(rx, dev, napi, page_info, len); } -have_skb: - /* We didn't manage to allocate an skb but we haven't had any - * reset worthy failures. - */ if (!skb) { u64_stats_update_begin(&rx->statss); rx->rx_skb_alloc_fail++; u64_stats_update_end(&rx->statss); - return true; + return false; } if (likely(feat & NETIF_F_RXCSUM)) { @@ -399,19 +504,73 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx) return (GVE_SEQNO(flags_seq) == rx->desc.seqno); } +static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + int refill_target = rx->mask + 1; + u32 fill_cnt = rx->fill_cnt; + + while (fill_cnt - rx->cnt < refill_target) { + struct gve_rx_slot_page_info *page_info; + u32 idx = fill_cnt & rx->mask; + + page_info = &rx->data.page_info[idx]; + if (page_info->can_flip) { + /* The other half of the page is free because it was + * free when we processed the descriptor. Flip to it. + */ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + + gve_rx_flip_buff(page_info, &data_slot->addr); + page_info->can_flip = 0; + } else { + /* It is possible that the networking stack has already + * finished processing all outstanding packets in the buffer + * and it can be reused. + * Flipping is unnecessary here - if the networking stack still + * owns half the page it is impossible to tell which half. Either + * the whole page is free or it needs to be replaced. + */ + int recycle = gve_rx_can_recycle_buffer(page_info->page); + + if (recycle < 0) { + if (!rx->data.raw_addressing) + gve_schedule_reset(priv); + return false; + } + if (!recycle) { + /* We can't reuse the buffer - alloc a new one*/ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + struct device *dev = &priv->pdev->dev; + + gve_rx_free_buffer(dev, page_info, data_slot); + page_info->page = NULL; + if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) + break; + } + } + fill_cnt++; + } + rx->fill_cnt = fill_cnt; + return true; +} + bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, netdev_features_t feat) { struct gve_priv *priv = rx->gve; + u32 work_done = 0, packets = 0; struct gve_rx_desc *desc; u32 cnt = rx->cnt; u32 idx = cnt & rx->mask; - u32 work_done = 0; u64 bytes = 0; desc = rx->desc.desc_ring + idx; while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && work_done < budget) { + bool dropped; + netif_info(priv, rx_status, priv->dev, "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n", rx->q_num, idx, desc, desc->flags_seq); @@ -419,9 +578,11 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, "[%d] seqno=%d rx->desc.seqno=%d\n", rx->q_num, GVE_SEQNO(desc->flags_seq), rx->desc.seqno); - bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; - if (!gve_rx(rx, desc, feat, idx)) - gve_schedule_reset(priv); + dropped = !gve_rx(rx, desc, feat, idx); + if (!dropped) { + bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; + packets++; + } cnt++; idx = cnt & rx->mask; desc = rx->desc.desc_ring + idx; @@ -429,15 +590,34 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, work_done++; } - if (!work_done) + if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) return false; u64_stats_update_begin(&rx->statss); - rx->rpackets += work_done; + rx->rpackets += packets; rx->rbytes += bytes; u64_stats_update_end(&rx->statss); rx->cnt = cnt; - rx->fill_cnt += work_done; + + /* restock ring slots */ + if (!rx->data.raw_addressing) { + /* In QPL mode buffs are refilled as the desc are processed */ + rx->fill_cnt += work_done; + } else if (rx->fill_cnt - cnt <= rx->db_threshold) { + /* In raw addressing mode buffs are only refilled if the avail + * falls below a threshold. + */ + if (!gve_rx_refill_buffers(priv, rx)) + return false; + + /* If we were not able to completely refill buffers, we'll want + * to schedule this queue for work again to refill buffers. + */ + if (rx->fill_cnt - cnt <= rx->db_threshold) { + gve_rx_write_doorbell(priv, rx); + return true; + } + } gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index d0244feb0301..6938f3a939d6 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -158,9 +158,11 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) tx->q_resources, tx->q_resources_bus); tx->q_resources = NULL; - gve_tx_fifo_release(priv, &tx->tx_fifo); - gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); - tx->tx_fifo.qpl = NULL; + if (!tx->raw_addressing) { + gve_tx_fifo_release(priv, &tx->tx_fifo); + gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); + tx->tx_fifo.qpl = NULL; + } bytes = sizeof(*tx->desc) * slots; dma_free_coherent(hdev, bytes, tx->desc, tx->bus); @@ -206,11 +208,15 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) if (!tx->desc) goto abort_with_info; - tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); + tx->raw_addressing = priv->raw_addressing; + tx->dev = &priv->pdev->dev; + if (!tx->raw_addressing) { + tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); - /* map Tx FIFO */ - if (gve_tx_fifo_init(priv, &tx->tx_fifo)) - goto abort_with_desc; + /* map Tx FIFO */ + if (gve_tx_fifo_init(priv, &tx->tx_fifo)) + goto abort_with_desc; + } tx->q_resources = dma_alloc_coherent(hdev, @@ -228,7 +234,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) return 0; abort_with_fifo: - gve_tx_fifo_release(priv, &tx->tx_fifo); + if (!tx->raw_addressing) + gve_tx_fifo_release(priv, &tx->tx_fifo); abort_with_desc: dma_free_coherent(hdev, bytes, tx->desc, tx->bus); tx->desc = NULL; @@ -301,27 +308,47 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, return bytes; } -/* The most descriptors we could need are 3 - 1 for the headers, 1 for - * the beginning of the payload at the end of the FIFO, and 1 if the - * payload wraps to the beginning of the FIFO. +/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag, + * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor, + * and +1 if the payload wraps to the beginning of the FIFO. */ -#define MAX_TX_DESC_NEEDED 3 +#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3) +static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) +{ + if (info->skb) { + dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), + dma_unmap_len(&info->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&info->buf, len, 0); + } else { + dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), + dma_unmap_len(&info->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&info->buf, len, 0); + } +} /* Check if sufficient resources (descriptor ring space, FIFO space) are * available to transmit the given number of bytes. */ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) { - return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && - gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required)); + bool can_alloc = true; + + if (!tx->raw_addressing) + can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); + + return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); } /* Stops the queue if the skb cannot be transmitted. */ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) { - int bytes_required; + int bytes_required = 0; + + if (!tx->raw_addressing) + bytes_required = gve_skb_fifo_bytes_required(tx, skb); - bytes_required = gve_skb_fifo_bytes_required(tx, skb); if (likely(gve_can_tx(tx, bytes_required))) return 0; @@ -395,17 +422,13 @@ static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, { u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; u64 first_page = iov_offset / PAGE_SIZE; - dma_addr_t dma; u64 page; - for (page = first_page; page <= last_page; page++) { - dma = page_buses[page]; - dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); - } + for (page = first_page; page <= last_page; page++) + dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE); } -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, - struct device *dev) +static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) { int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; union gve_tx_desc *pkt_desc, *seg_desc; @@ -447,7 +470,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, skb_copy_bits(skb, 0, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, hlen); - gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, info->iov[hdr_nfrags - 1].iov_offset, info->iov[hdr_nfrags - 1].iov_len); copy_offset = hlen; @@ -463,7 +486,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, skb_copy_bits(skb, copy_offset, tx->tx_fifo.base + info->iov[i].iov_offset, info->iov[i].iov_len); - gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, info->iov[i].iov_offset, info->iov[i].iov_len); copy_offset += info->iov[i].iov_len; @@ -472,6 +495,94 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, return 1 + payload_nfrags; } +static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + int hlen, payload_nfrags, l4_hdr_offset; + union gve_tx_desc *pkt_desc, *seg_desc; + struct gve_tx_buffer_state *info; + bool is_gso = skb_is_gso(skb); + u32 idx = tx->req & tx->mask; + struct gve_tx_dma_buf *buf; + u64 addr; + u32 len; + int i; + + info = &tx->info[idx]; + pkt_desc = &tx->desc[idx]; + + l4_hdr_offset = skb_checksum_start_offset(skb); + /* If the skb is gso, then we want only up to the tcp header in the first segment + * to efficiently replicate on each segment otherwise we want the linear portion + * of the skb (which will contain the checksum because skb->csum_start and + * skb->csum_offset are given relative to skb->head) in the first segment. + */ + hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb); + len = skb_headlen(skb); + + info->skb = skb; + + addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto drop; + } + buf = &info->buf; + dma_unmap_len_set(buf, len, len); + dma_unmap_addr_set(buf, dma, addr); + + payload_nfrags = shinfo->nr_frags; + if (hlen < len) { + /* For gso the rest of the linear portion of the skb needs to + * be in its own descriptor. + */ + payload_nfrags++; + gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + 1 + payload_nfrags, hlen, addr); + + len -= hlen; + addr += hlen; + idx = (tx->req + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + } else { + gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + 1 + payload_nfrags, hlen, addr); + } + + for (i = 0; i < shinfo->nr_frags; i++) { + const skb_frag_t *frag = &shinfo->frags[i]; + + idx = (idx + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + len = skb_frag_size(frag); + addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto unmap_drop; + } + buf = &tx->info[idx].buf; + tx->info[idx].skb = NULL; + dma_unmap_len_set(buf, len, len); + dma_unmap_addr_set(buf, dma, addr); + + gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + } + + return 1 + payload_nfrags; + +unmap_drop: + i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); + while (i--) { + idx--; + gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); + } +drop: + tx->dropped_pkt++; + return 0; +} + netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); @@ -490,17 +601,26 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } - nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); - - netdev_tx_sent_queue(tx->netdev_txq, skb->len); - skb_tx_timestamp(skb); - - /* give packets to NIC */ - tx->req += nsegs; + if (tx->raw_addressing) + nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); + else + nsegs = gve_tx_add_skb_copy(priv, tx, skb); + + /* If the packet is getting sent, we need to update the skb */ + if (nsegs) { + netdev_tx_sent_queue(tx->netdev_txq, skb->len); + skb_tx_timestamp(skb); + tx->req += nsegs; + } else { + dev_kfree_skb_any(skb); + } if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; + /* Give packets to NIC. Even if this packet failed to send the doorbell + * might need to be rung because of xmit_more. + */ gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } @@ -525,24 +645,29 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, info = &tx->info[idx]; skb = info->skb; + /* Unmap the buffer */ + if (tx->raw_addressing) + gve_tx_unmap_buf(tx->dev, info); + tx->done++; /* Mark as free */ if (skb) { info->skb = NULL; bytes += skb->len; pkts++; dev_consume_skb_any(skb); + if (tx->raw_addressing) + continue; /* FIFO free */ for (i = 0; i < ARRAY_SIZE(info->iov); i++) { - space_freed += info->iov[i].iov_len + - info->iov[i].iov_padding; + space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; info->iov[i].iov_len = 0; info->iov[i].iov_padding = 0; } } - tx->done++; } - gve_tx_free_fifo(&tx->tx_fifo, space_freed); + if (!tx->raw_addressing) + gve_tx_free_fifo(&tx->tx_fifo, space_freed); u64_stats_update_begin(&tx->statss); tx->bytes_done += bytes; tx->pkt_done += pkts; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a9aca8c24e90..173d6966c1a3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -546,9 +546,9 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, @@ -593,9 +593,9 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 1ffe8fac702d..fb5e8842983c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -110,6 +110,7 @@ struct hclge_vf_to_pf_msg { u8 en_bc; u8 en_uc; u8 en_mc; + u8 en_limit_promisc; }; struct { u8 vector_id; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 78b48861ff8b..a7daf6d4511e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -29,7 +29,9 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> +#include <linux/pkt_sched.h> #include <linux/types.h> +#include <net/pkt_cls.h> #define HNAE3_MOD_VERSION "1.0" @@ -457,6 +459,12 @@ struct hnae3_ae_dev { * Configure the default MAC for specified VF * get_module_eeprom * Get the optical module eeprom info. + * add_cls_flower + * Add clsflower rule + * del_cls_flower + * Delete clsflower rule + * cls_flower_active + * Check if any cls flower rule exist */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -634,6 +642,11 @@ struct hnae3_ae_ops { int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, u32 len, u8 *data); bool (*get_cmdq_stat)(struct hnae3_handle *handle); + int (*add_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, int tc); + int (*del_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower); + bool (*cls_flower_active)(struct hnae3_handle *handle); }; struct hnae3_dcb_ops { @@ -647,7 +660,8 @@ struct hnae3_dcb_ops { u8 (*getdcbx)(struct hnae3_handle *); u8 (*setdcbx)(struct hnae3_handle *, u8); - int (*setup_tc)(struct hnae3_handle *, u8, u8 *); + int (*setup_tc)(struct hnae3_handle *handle, + struct tc_mqprio_qopt_offload *mqprio_qopt); }; struct hnae3_ae_algo { @@ -659,15 +673,17 @@ struct hnae3_ae_algo { #define HNAE3_INT_NAME_LEN 32 #define HNAE3_ITR_COUNTDOWN_START 100 +#define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 struct hnae3_tc_info { - u16 tqp_offset; /* TQP offset from base TQP */ - u16 tqp_count; /* Total TQPs */ - u8 tc; /* TC index */ - bool enable; /* If this TC is enable or not */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ + u16 tqp_count[HNAE3_MAX_TC]; + u16 tqp_offset[HNAE3_MAX_TC]; + unsigned long tc_en; /* bitmap of TC enabled */ + u8 num_tc; /* Total number of enabled TCs */ + bool mqprio_active; }; -#define HNAE3_MAX_TC 8 -#define HNAE3_MAX_USER_PRIO 8 struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ @@ -676,9 +692,7 @@ struct hnae3_knic_private_info { u16 num_tx_desc; u16 num_rx_desc; - u8 num_tc; /* Total number of enabled TCs */ - u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ - struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + struct hnae3_tc_info tc_info; u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ @@ -719,6 +733,11 @@ struct hnae3_roce_private_info { #define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) #define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) +enum hnae3_pflag { + HNAE3_PFLAG_LIMIT_PROMISC, + HNAE3_PFLAG_MAX +}; + struct hnae3_handle { struct hnae3_client *client; struct pci_dev *pdev; @@ -741,6 +760,9 @@ struct hnae3_handle { /* Network interface message level enabled bits */ u32 msg_enable; + + unsigned long supported_pflags; + unsigned long priv_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index cb26742e2ed8..9d4e9c053a8f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -385,7 +385,8 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h) dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max); dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1798c0a04b0e..405e49033417 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -323,13 +323,14 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + unsigned int queue_size = kinfo->num_tqps; int i, ret; - if (kinfo->num_tc <= 1) { + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { netdev_reset_tc(netdev); } else { - ret = netdev_set_num_tc(netdev, kinfo->num_tc); + ret = netdev_set_num_tc(netdev, tc_info->num_tc); if (ret) { netdev_err(netdev, "netdev_set_num_tc fail, ret=%d!\n", ret); @@ -337,13 +338,11 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) } for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) + if (!test_bit(i, &tc_info->tc_en)) continue; - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], + tc_info->tqp_offset[i]); } } @@ -369,7 +368,7 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h) u16 alloc_tqps, max_rss_size, rss_size; h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); - rss_size = alloc_tqps / h->kinfo.num_tc; + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; return min_t(u16, rss_size, max_rss_size); } @@ -508,7 +507,7 @@ static int hns3_nic_net_open(struct net_device *netdev) kinfo = &h->kinfo; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) - netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); @@ -1006,6 +1005,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { struct hnae3_handle *handle = tx_ring->tqp->handle; + struct hnae3_ae_dev *ae_dev; struct vlan_ethhdr *vhdr; int rc; @@ -1013,10 +1013,13 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, skb_vlan_tag_present(skb))) return 0; - /* Since HW limitation, if port based insert VLAN enabled, only one VLAN - * header is allowed in skb, otherwise it will cause RAS error. + /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert + * VLAN enabled, only one VLAN header is allowed in skb, otherwise it + * will cause RAS error. */ + ae_dev = pci_get_drvdata(handle->pdev); if (unlikely(skb_vlan_tagged_multi(skb) && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE)) return -EINVAL; @@ -1665,6 +1668,13 @@ static int hns3_nic_set_features(struct net_device *netdev, h->ae_algo->ops->enable_fd(h, enable); } + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + h->ae_algo->ops->cls_flower_active(h)) { + netdev_err(netdev, + "there are offloaded TC filters active, cannot disable HW TC offload"); + return -EINVAL; + } + netdev->features = features; return 0; } @@ -1790,7 +1800,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; struct hnae3_knic_private_info *kinfo; u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; @@ -1813,16 +1822,70 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; +} + +static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, + struct flow_cls_offload *flow) +{ + int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); + struct hnae3_handle *h = hns3_get_handle(priv->netdev); + + switch (flow->command) { + case FLOW_CLS_REPLACE: + if (h->ae_algo->ops->add_cls_flower) + return h->ae_algo->ops->add_cls_flower(h, flow, tc); + break; + case FLOW_CLS_DESTROY: + if (h->ae_algo->ops->del_cls_flower) + return h->ae_algo->ops->del_cls_flower(h, flow); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct hns3_nic_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return hns3_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } } +static LIST_HEAD(hns3_block_cb_list); + static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_QDISC_MQPRIO) + struct hns3_nic_priv *priv = netdev_priv(dev); + int ret; + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + ret = hns3_setup_tc(dev, type_data); + break; + case TC_SETUP_BLOCK: + ret = flow_block_cb_setup_simple(type_data, + &hns3_block_cb_list, + hns3_setup_tc_block_cb, + priv, priv, true); + break; + default: return -EOPNOTSUPP; + } - return hns3_setup_tc(dev, type_data); + return ret; } static int hns3_vlan_rx_add_vid(struct net_device *netdev, @@ -2419,6 +2482,11 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; } + + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + netdev->hw_features |= NETIF_F_HW_TC; + netdev->features |= NETIF_F_HW_TC; + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -3976,21 +4044,20 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; int i; for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; int j; - if (!tc_info->enable) + if (!test_bit(i, &tc_info->tc_en)) continue; - for (j = 0; j < tc_info->tqp_count; j++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { struct hnae3_queue *q; - q = priv->ring[tc_info->tqp_offset + j].tqp; - hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, - tc_info->tc); + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); } } } @@ -4117,7 +4184,8 @@ static void hns3_info_show(struct hns3_nic_priv *priv) dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } @@ -4226,6 +4294,9 @@ static int hns3_client_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + if (netif_msg_drv(handle)) hns3_info_show(priv); @@ -4685,6 +4756,12 @@ int hns3_set_channels(struct net_device *netdev, if (ch->rx_count || ch->tx_count) return -EINVAL; + if (kinfo->tc_info.mqprio_active) { + dev_err(&netdev->dev, + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); + return -EINVAL; + } + if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < 1) { dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 3cca3c125c03..e2fc443fe92c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -18,6 +18,11 @@ struct hns3_sfp_type { u8 ext_type; }; +struct hns3_pflag_desc { + char name[ETH_GSTRING_LEN]; + void (*handler)(struct net_device *netdev, bool enable); +}; + /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ @@ -60,6 +65,8 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), }; +#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags) + #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) @@ -395,6 +402,23 @@ static void hns3_self_test(struct net_device *ndev, netif_dbg(h, drv, ndev, "self test end\n"); } +static void hns3_update_limit_promisc_mode(struct net_device *netdev, + bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (enable) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + + hns3_request_update_promisc_mode(handle); +} + +static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = { + { "limit_promisc", hns3_update_limit_promisc_mode } +}; + static int hns3_get_sset_count(struct net_device *netdev, int stringset) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -411,6 +435,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); + case ETH_SS_PRIV_FLAGS: + return HNAE3_PFLAG_MAX; + default: return -EOPNOTSUPP; } @@ -464,6 +491,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; char *buff = (char *)data; + int i; if (!ops->get_strings) return; @@ -476,6 +504,13 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) { + snprintf(buff, ETH_GSTRING_LEN, "%s", + hns3_priv_flags[i].name); + buff += ETH_GSTRING_LEN; + } + break; default: break; } @@ -1517,6 +1552,53 @@ static int hns3_get_module_eeprom(struct net_device *netdev, return ops->get_module_eeprom(handle, ee->offset, ee->len, data); } +static u32 hns3_get_priv_flags(struct net_device *netdev) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + return handle->priv_flags; +} + +static int hns3_check_priv_flags(struct hnae3_handle *h, u32 changed) +{ + u32 i; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) + if ((changed & BIT(i)) && !test_bit(i, &h->supported_pflags)) { + netdev_err(h->netdev, "%s is unsupported\n", + hns3_priv_flags[i].name); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + u32 changed = pflags ^ handle->priv_flags; + int ret; + u32 i; + + ret = hns3_check_priv_flags(handle, changed); + if (ret) + return ret; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) { + if (changed & BIT(i)) { + bool enable = !(handle->priv_flags & BIT(i)); + + if (enable) + handle->priv_flags |= BIT(i); + else + handle->priv_flags &= ~BIT(i); + hns3_priv_flags[i].handler(netdev, enable); + } + } + + return 0; +} + #define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ ETHTOOL_COALESCE_USE_ADAPTIVE | \ ETHTOOL_COALESCE_RX_USECS_HIGH | \ @@ -1547,6 +1629,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_link = hns3_get_link, .get_msglevel = hns3_get_msglevel, .set_msglevel = hns3_set_msglevel, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1583,6 +1667,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .set_fecparam = hns3_set_fecparam, .get_module_info = hns3_get_module_info, .get_module_eeprom = hns3_get_module_eeprom, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 85986c7d71fa..b728be4737f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -359,6 +359,8 @@ static void hclge_parse_capability(struct hclge_dev *hdev, set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) + set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); } static enum hclge_cmd_status diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 49cbd954f76b..edfadb5cb1c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -518,6 +518,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10) #define HCLGE_CFG_UMV_TBL_SPACE_S 16 #define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) +#define HCLGE_CFG_PF_RSS_SIZE_S 0 +#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0) #define HCLGE_CFG_CMD_CNT 4 @@ -558,18 +560,23 @@ struct hclge_rss_input_tuple_cmd { }; #define HCLGE_RSS_CFG_TBL_SIZE 16 +#define HCLGE_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_RSS_CFG_TBL_BW_H 2U +#define HCLGE_RSS_CFG_TBL_BW_L 8U struct hclge_rss_indirection_table_cmd { __le16 start_table_index; __le16 rss_set_bitmap; - u8 rsv[4]; - u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; + u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H]; + u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE]; }; #define HCLGE_RSS_TC_OFFSET_S 0 -#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0) +#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_RSS_TC_SIZE_MSB_B 11 #define HCLGE_RSS_TC_SIZE_S 12 #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 #define HCLGE_RSS_TC_VALID_B 15 struct hclge_rss_tc_mode_cmd { __le16 rss_tc_mode[HCLGE_MAX_TC_NUM]; @@ -583,23 +590,26 @@ struct hclge_link_status_cmd { u8 rsv[23]; }; -struct hclge_promisc_param { - u8 vf_id; - u8 enable; -}; +/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */ +#define HCLGE_PROMISC_EN_UC 1 +#define HCLGE_PROMISC_EN_MC 2 +#define HCLGE_PROMISC_EN_BC 3 +#define HCLGE_PROMISC_TX_EN 4 +#define HCLGE_PROMISC_RX_EN 5 + +/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */ +#define HCLGE_PROMISC_UC_RX_EN 2 +#define HCLGE_PROMISC_MC_RX_EN 3 +#define HCLGE_PROMISC_BC_RX_EN 4 +#define HCLGE_PROMISC_UC_TX_EN 5 +#define HCLGE_PROMISC_MC_TX_EN 6 +#define HCLGE_PROMISC_BC_TX_EN 7 -#define HCLGE_PROMISC_TX_EN_B BIT(4) -#define HCLGE_PROMISC_RX_EN_B BIT(5) -#define HCLGE_PROMISC_EN_B 1 -#define HCLGE_PROMISC_EN_ALL 0x7 -#define HCLGE_PROMISC_EN_UC 0x1 -#define HCLGE_PROMISC_EN_MC 0x2 -#define HCLGE_PROMISC_EN_BC 0x4 struct hclge_promisc_cfg_cmd { - u8 flag; + u8 promisc; u8 vf_id; - __le16 rsv0; - u8 rsv1[20]; + u8 extend_promisc; + u8 rsv0[21]; }; enum hclge_promisc_type { @@ -822,6 +832,7 @@ enum hclge_mac_vlan_cfg_sel { #define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_ACCEPT_TAG2_B 5 #define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_TAG_SHIFT_MODE_EN_B 7 #define HCLGE_VF_NUM_PER_BYTE 8 struct hclge_vport_vtag_tx_cfg_cmd { @@ -838,6 +849,8 @@ struct hclge_vport_vtag_tx_cfg_cmd { #define HCLGE_REM_TAG2_EN_B 1 #define HCLGE_SHOW_TAG1_EN_B 2 #define HCLGE_SHOW_TAG2_EN_B 3 +#define HCLGE_DISCARD_TAG1_EN_B 5 +#define HCLGE_DISCARD_TAG2_EN_B 6 struct hclge_vport_vtag_rx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; @@ -1045,6 +1058,9 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_WR_RULE_ID_B 0 #define HCLGE_FD_AD_RULE_ID_S 1 #define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) +#define HCLGE_FD_AD_TC_OVRD_B 16 +#define HCLGE_FD_AD_TC_SIZE_S 17 +#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) struct hclge_fd_ad_config_cmd { u8 stage; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f990f6915226..e08d11b8ecf1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -397,32 +397,130 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) return 0; } +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u16 queue_sum = 0; + int ret; + int i; + + if (!mqprio_qopt->qopt.num_tc) { + mqprio_qopt->qopt.num_tc = 1; + return 0; + } + + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, + mqprio_qopt->qopt.prio_tc_map); + if (ret) + return ret; + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { + dev_err(&hdev->pdev->dev, + "qopt queue count must be power of 2\n"); + return -EINVAL; + } + + if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) { + dev_err(&hdev->pdev->dev, + "qopt queue count should be no more than %u\n", + hdev->pf_rss_size_max); + return -EINVAL; + } + + if (mqprio_qopt->qopt.offset[i] != queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue offset must start from 0, and being continuous\n"); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { + dev_err(&hdev->pdev->dev, + "qopt tx_rate is not supported\n"); + return -EOPNOTSUPP; + } + + queue_sum = mqprio_qopt->qopt.offset[i]; + queue_sum += mqprio_qopt->qopt.count[i]; + } + if (hdev->vport[0].alloc_tqps < queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue count sum should be less than %u\n", + hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + int i; + + memset(tc_info, 0, sizeof(*tc_info)); + tc_info->num_tc = mqprio_qopt->qopt.num_tc; + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, + sizeof_field(struct hnae3_tc_info, prio_tc)); + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, + sizeof_field(struct hnae3_tc_info, tqp_count)); + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, + sizeof_field(struct hnae3_tc_info, tqp_offset)); + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + set_bit(tc_info->prio_tc[i], &tc_info->tc_en); +} + +static int hclge_config_tc(struct hclge_dev *hdev, + struct hnae3_tc_info *tc_info) +{ + int i; + + hclge_tm_schd_info_update(hdev, tc_info->num_tc); + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; + + return hclge_map_update(hdev); +} + /* Set up TC for hardware offloaded mqprio in channel mode */ -static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) +static int hclge_setup_tc(struct hnae3_handle *h, + struct tc_mqprio_qopt_offload *mqprio_qopt) { struct hclge_vport *vport = hclge_get_vport(h); + struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info old_tc_info; + u8 tc = mqprio_qopt->qopt.num_tc; int ret; + /* if client unregistered, it's not allowed to change + * mqprio configuration, which may cause uninit ring + * fail. + */ + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return -EBUSY; + if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) return -EINVAL; - ret = hclge_dcb_common_validate(hdev, tc, prio_tc); - if (ret) - return -EINVAL; + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check mqprio qopt params, ret = %d\n", ret); + return ret; + } ret = hclge_notify_down_uinit(hdev); if (ret) return ret; - hclge_tm_schd_info_update(hdev, tc); - hclge_tm_prio_tc_info_update(hdev, prio_tc); - - ret = hclge_tm_init_hw(hdev, false); - if (ret) - goto err_out; + kinfo = &vport->nic.kinfo; + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); + kinfo->tc_info.mqprio_active = tc > 0; - ret = hclge_client_setup_tc(hdev); + ret = hclge_config_tc(hdev, &kinfo->tc_info); if (ret) goto err_out; @@ -436,6 +534,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) return hclge_notify_init_up(hdev); err_out: + /* roll-back */ + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); + if (hclge_config_tc(hdev, &kinfo->tc_info)) + dev_err(&hdev->pdev->dev, + "failed to roll back tc configuration\n"); + hclge_notify_init_up(hdev); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index bedbc118c4a3..8f6dea5198cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1454,7 +1454,7 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { u16 qsid = vport->qs_offset + i; hclge_dbg_dump_qs_shaper_single(hdev, qsid); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index a9066e6ff697..ca2ab6cf84d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -35,8 +35,6 @@ #define HCLGE_DBG_DFX_SSU_2_OFFSET 12 -#pragma pack(1) - struct hclge_qos_pri_map_cmd { u8 pri0_tc : 4, pri1_tc : 4; @@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info { struct hclge_dbg_reg_common_msg reg_msg; }; -#pragma pack() - static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { {false, "Reserved"}, {true, "BP_CPU_STATE"}, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ca668a47121e..7a164115c845 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1285,9 +1285,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), HCLGE_CFG_DEFAULT_SPEED_M, HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1308,6 +1308,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) HCLGE_CFG_UMV_TBL_SPACE_S); if (!cfg->umv_space) cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; + + cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_PF_RSS_SIZE_M, + HCLGE_CFG_PF_RSS_SIZE_S); + + /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a + * power of 2, instead of reading out directly. This would + * be more flexible for future changes and expansions. + * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, + * it does not make sense if PF's field is 0. In this case, PF and VF + * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. + */ + cfg->pf_rss_size_max = cfg->pf_rss_size_max ? + 1U << cfg->pf_rss_size_max : + cfg->vf_rss_size_max; } /* hclge_get_cfg: query the static parameter from flash @@ -1469,7 +1484,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->num_vmdq_vport = cfg.vmdq_vport_num; hdev->base_tqp_pid = 0; - hdev->rss_size_max = cfg.rss_size_max; + hdev->vf_rss_size_max = cfg.vf_rss_size_max; + hdev->pf_rss_size_max = cfg.pf_rss_size_max; hdev->rx_buf_len = cfg.rx_buf_len; ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; @@ -1652,7 +1668,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) } } vport->alloc_tqps = alloced; - kinfo->rss_size = min_t(u16, hdev->rss_size_max, + kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, vport->alloc_tqps / hdev->tm_info.num_tc); /* ensure one to one mapping between irq and queue at default */ @@ -4262,12 +4278,16 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, return 0; } -static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir) { struct hclge_rss_indirection_table_cmd *req; struct hclge_desc desc; - int i, j; + u8 rss_msb_oft; + u8 rss_msb_val; int ret; + u16 qid; + int i; + u32 j; req = (struct hclge_rss_indirection_table_cmd *)desc.data; @@ -4278,11 +4298,15 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) req->start_table_index = cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); - - for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) - req->rss_result[j] = - indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; - + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) { + qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; + req->rss_qid_l[j] = qid & 0xff; + rss_msb_oft = + j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; + rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) << + (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); + req->rss_qid_h[rss_msb_oft] |= rss_msb_val; + } ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -4311,6 +4335,8 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B, + tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1); hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); @@ -4601,21 +4627,58 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return hdev->rss_size_max; + return hdev->pf_rss_size_max; } -int hclge_rss_init_hw(struct hclge_dev *hdev) +static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct hclge_vport *vport = hdev->vport; - u8 *rss_indir = vport[0].rss_indirection_tbl; - u16 rss_size = vport[0].alloc_rss_size; u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + struct hnae3_tc_info *tc_info; + u16 roundup_size; + u16 rss_size; + int i; + + tc_info = &vport->nic.kinfo.tc_info; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + rss_size = tc_info->tqp_count[i]; + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + /* tc_size set to hardware is the log2 of roundup power of two + * of rss_size, the acutal queue size is limited by indirection + * table. + */ + if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || + rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %u\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = tc_info->tqp_offset[i]; + } + + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} + +int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 *rss_indir = vport[0].rss_indirection_tbl; u8 *key = vport[0].rss_hash_key; u8 hfunc = vport[0].rss_algo; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 roundup_size; - unsigned int i; int ret; ret = hclge_set_rss_indir_table(hdev, rss_indir); @@ -4630,32 +4693,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) if (ret) return ret; - /* Each TC have the same queue size, and tc_size set to hardware is - * the log2 of roundup power of two of rss_size, the acutal queue - * size is limited by indirection table. - */ - if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { - dev_err(&hdev->pdev->dev, - "Configure rss tc size failed, invalid TC_SIZE = %u\n", - rss_size); - return -EINVAL; - } - - roundup_size = roundup_pow_of_two(rss_size); - roundup_size = ilog2(roundup_size); - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; - - if (!(hdev->hw_tc_map & BIT(i))) - continue; - - tc_valid[i] = 1; - tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; - } - - return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + return hclge_init_rss_tc_mode(hdev); } void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) @@ -4826,61 +4864,56 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, return ret; } -static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param) +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, + bool en_uc, bool en_mc, bool en_bc) { + struct hclge_vport *vport = &hdev->vport[vf_id]; + struct hnae3_handle *handle = &vport->nic; struct hclge_promisc_cfg_cmd *req; struct hclge_desc desc; + bool uc_tx_en = en_uc; + u8 promisc_cfg = 0; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); req = (struct hclge_promisc_cfg_cmd *)desc.data; - req->vf_id = param->vf_id; + req->vf_id = vf_id; - /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - */ - req->flag = (param->enable << HCLGE_PROMISC_EN_B) | - HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; + if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) + uc_tx_en = false; + + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); + req->extend_promisc = promisc_cfg; + + /* to be compatible with DEVICE_VERSION_V1/2 */ + promisc_cfg = 0; + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); + req->promisc = promisc_cfg; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, - "failed to set vport %d promisc mode, ret = %d.\n", - param->vf_id, ret); + "failed to set vport %u promisc mode, ret = %d.\n", + vf_id, ret); return ret; } -static void hclge_promisc_param_init(struct hclge_promisc_param *param, - bool en_uc, bool en_mc, bool en_bc, - int vport_id) -{ - if (!param) - return; - - memset(param, 0, sizeof(struct hclge_promisc_param)); - if (en_uc) - param->enable = HCLGE_PROMISC_EN_UC; - if (en_mc) - param->enable |= HCLGE_PROMISC_EN_MC; - if (en_bc) - param->enable |= HCLGE_PROMISC_EN_BC; - param->vf_id = vport_id; -} - int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { - struct hclge_dev *hdev = vport->back; - struct hclge_promisc_param param; - - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, - vport->vport_id); - return hclge_cmd_set_promisc_mode(hdev, ¶m); + return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, + en_uc_pmc, en_mc_pmc, en_bc_pmc); } static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, @@ -5015,7 +5048,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) } key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; - key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; key_cfg->inner_sipv6_word_en = LOW_2_WORDS; key_cfg->inner_dipv6_word_en = LOW_2_WORDS; key_cfg->outer_sipv6_word_en = 0; @@ -5092,6 +5125,7 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, struct hclge_fd_ad_data *action) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclge_fd_ad_config_cmd *req; struct hclge_desc desc; u64 ad_data = 0; @@ -5107,6 +5141,12 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, action->write_rule_id_to_bd); hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, action->rule_id); + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, + action->override_tc); + hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, + HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); + } ad_data <<= 32; hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, @@ -5350,16 +5390,22 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, static int hclge_config_action(struct hclge_dev *hdev, u8 stage, struct hclge_fd_rule *rule) { + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_fd_ad_data ad_data; + memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); ad_data.ad_id = rule->location; if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { ad_data.drop_packet = true; - ad_data.forward_to_direct_queue = false; - ad_data.queue_id = 0; + } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { + ad_data.override_tc = true; + ad_data.queue_id = + kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; + ad_data.tc_size = + ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); } else { - ad_data.drop_packet = false; ad_data.forward_to_direct_queue = true; ad_data.queue_id = rule->queue_id; } @@ -5876,6 +5922,14 @@ clear_rule: return ret; } +static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; +} + static int hclge_add_fd_entry(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -5900,6 +5954,12 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EOPNOTSUPP; } + if (hclge_is_cls_flower_active(handle)) { + dev_err(&hdev->pdev->dev, + "please delete all exist cls flower rules first\n"); + return -EINVAL; + } + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; ret = hclge_fd_check_spec(hdev, fs, &unused); @@ -5930,7 +5990,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EINVAL; } - action = HCLGE_FD_ACTION_ACCEPT_PACKET; + action = HCLGE_FD_ACTION_SELECT_QUEUE; q_index = ring; } @@ -5981,7 +6041,8 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) return -EINVAL; - if (!hclge_fd_rule_exist(hdev, fs->location)) { + if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || + !hclge_fd_rule_exist(hdev, fs->location)) { dev_err(&hdev->pdev->dev, "Delete fail, rule %u is inexistent\n", fs->location); return -ENOENT; @@ -6081,7 +6142,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - if (!hnae3_dev_fd_supported(hdev)) + if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle)) return -EOPNOTSUPP; cmd->rule_cnt = hdev->hclge_fd_rule_num; @@ -6424,7 +6485,8 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, * arfs should not work */ spin_lock_bh(&hdev->fd_rule_lock); - if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE || + hdev->fd_active_type != HCLGE_FD_RULE_NONE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } @@ -6452,7 +6514,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, set_bit(bit_id, hdev->fd_bmap); rule->location = bit_id; - rule->flow_id = flow_id; + rule->arfs.flow_id = flow_id; rule->queue_id = queue_id; hclge_fd_build_arfs_rule(&new_tuples, rule); ret = hclge_fd_config_rule(hdev, rule); @@ -6496,7 +6558,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) } hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { if (rps_may_expire_flow(handle->netdev, rule->queue_id, - rule->flow_id, rule->location)) { + rule->arfs.flow_id, rule->location)) { hlist_del_init(&rule->rule_node); hlist_add_head(&rule->rule_node, &del_list); hdev->hclge_fd_rule_num--; @@ -6525,6 +6587,286 @@ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) #endif } +static void hclge_get_cls_key_basic(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(flow, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + rule->tuples.ether_proto = ethtype_key; + rule->tuples_mask.ether_proto = ethtype_mask; + rule->tuples.ip_proto = match.key->ip_proto; + rule->tuples_mask.ip_proto = match.mask->ip_proto; + } else { + rule->unused_tuple |= BIT(INNER_IP_PROTO); + rule->unused_tuple |= BIT(INNER_ETH_TYPE); + } +} + +static void hclge_get_cls_key_mac(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(flow, &match); + ether_addr_copy(rule->tuples.dst_mac, match.key->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); + ether_addr_copy(rule->tuples.src_mac, match.key->src); + ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); + } else { + rule->unused_tuple |= BIT(INNER_DST_MAC); + rule->unused_tuple |= BIT(INNER_SRC_MAC); + } +} + +static void hclge_get_cls_key_vlan(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(flow, &match); + rule->tuples.vlan_tag1 = match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT); + rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT); + } else { + rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } +} + +static void hclge_get_cls_key_ip(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + u16 addr_type = 0; + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(flow, &match); + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(flow, &match); + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->src); + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->dst); + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(flow, &match); + be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + match.mask->src.s6_addr32, IPV6_SIZE); + be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + match.mask->dst.s6_addr32, IPV6_SIZE); + } else { + rule->unused_tuple |= BIT(INNER_SRC_IP); + rule->unused_tuple |= BIT(INNER_DST_IP); + } +} + +static void hclge_get_cls_key_port(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + + rule->tuples.src_port = be16_to_cpu(match.key->src); + rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); + rule->tuples.dst_port = be16_to_cpu(match.key->dst); + rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); + } else { + rule->unused_tuple |= BIT(INNER_SRC_PORT); + rule->unused_tuple |= BIT(INNER_DST_PORT); + } +} + +static int hclge_parse_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, + struct hclge_fd_rule *rule) +{ + struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); + struct flow_dissector *dissector = flow->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + hclge_get_cls_key_basic(flow, rule); + hclge_get_cls_key_mac(flow, rule); + hclge_get_cls_key_vlan(flow, rule); + hclge_get_cls_key_ip(flow, rule); + hclge_get_cls_key_port(flow, rule); + + return 0; +} + +static int hclge_check_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, int tc) +{ + u32 prio = cls_flower->common.prio; + + if (tc < 0 || tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, "invalid traffic class\n"); + return -EINVAL; + } + + if (prio == 0 || + prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "prio %u should be in range[1, %u]\n", + prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + return -EINVAL; + } + + if (test_bit(prio - 1, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); + return -EINVAL; + } + return 0; +} + +static int hclge_add_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, + int tc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { + dev_err(&hdev->pdev->dev, + "please remove all exist fd rules via ethtool first\n"); + return -EINVAL; + } + + ret = hclge_check_cls_flower(hdev, cls_flower, tc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check cls flower params, ret = %d\n", ret); + return ret; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_parse_cls_flower(hdev, cls_flower, rule); + if (ret) + goto err; + + rule->action = HCLGE_FD_ACTION_SELECT_TC; + rule->cls_flower.tc = tc; + rule->location = cls_flower->common.prio - 1; + rule->vf_id = 0; + rule->cls_flower.cookie = cls_flower->cookie; + rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; + + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(handle); + + ret = hclge_fd_config_rule(hdev, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to add cls flower rule, ret = %d\n", ret); + goto err; + } + + return 0; +err: + kfree(rule); + return ret; +} + +static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, + unsigned long cookie) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->cls_flower.cookie == cookie) + return rule; + } + + return NULL; +} + +static int hclge_del_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_find_cls_flower(hdev, cls_flower->cookie); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, + NULL, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to delete cls flower rule %u, ret = %d\n", + rule->location, ret); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to delete cls flower rule %u in list, ret = %d\n", + rule->location, ret); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -8622,6 +8964,8 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) vcfg->insert_tag1_en ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, + vcfg->tag_shift_mode_en ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; @@ -8659,6 +9003,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) vcfg->vlan1_vlan_prionly ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, + vcfg->strip_tag1_discard_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, + vcfg->strip_tag2_discard_en ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / @@ -8686,7 +9034,10 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport, vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.default_tag1 = 0; } else { - vport->txvlan_cfg.accept_tag1 = false; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + + vport->txvlan_cfg.accept_tag1 = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; vport->txvlan_cfg.insert_tag1_en = true; vport->txvlan_cfg.default_tag1 = vlan_tag; } @@ -8701,16 +9052,21 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport, vport->txvlan_cfg.accept_untag2 = true; vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.default_tag2 = 0; + vport->txvlan_cfg.tag_shift_mode_en = true; if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { vport->rxvlan_cfg.strip_tag1_en = false; vport->rxvlan_cfg.strip_tag2_en = vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_discard_en = false; } else { vport->rxvlan_cfg.strip_tag1_en = vport->rxvlan_cfg.rx_vlan_offload_en; vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; @@ -9005,10 +9361,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { vport->rxvlan_cfg.strip_tag1_en = false; vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.strip_tag2_discard_en = false; } else { vport->rxvlan_cfg.strip_tag1_en = enable; vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; vport->rxvlan_cfg.rx_vlan_offload_en = enable; @@ -9120,6 +9480,7 @@ static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_vlan_info vlan_info; @@ -9149,16 +9510,25 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, vlan_info.qos = qos; vlan_info.vlan_proto = ntohs(proto); - if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { - return hclge_update_port_base_vlan_cfg(vport, state, - &vlan_info); - } else { - ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, state, - vlan, qos, - ntohs(proto)); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update port base vlan for vf %d, ret = %d\n", + vfid, ret); return ret; } + + /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based + * VLAN state. + */ + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && + test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, state, + vlan, qos, + ntohs(proto)); + + return 0; } static void hclge_clear_vf_vlan(struct hclge_dev *hdev) @@ -10671,12 +11041,10 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclge_get_max_channels(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return min_t(u32, hdev->rss_size_max, - vport->alloc_tqps / kinfo->num_tc); + return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); } static void hclge_get_channels(struct hnae3_handle *handle, @@ -10695,7 +11063,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, struct hclge_dev *hdev = vport->back; *alloc_tqps = vport->alloc_tqps; - *max_rss_size = hdev->rss_size_max; + *max_rss_size = hdev->pf_rss_size_max; } static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, @@ -10763,7 +11131,7 @@ out: dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } @@ -11496,6 +11864,9 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_mac = hclge_set_vf_mac, .get_module_eeprom = hclge_get_module_eeprom, .get_cmdq_stat = hclge_get_cmdq_stat, + .add_cls_flower = hclge_add_cls_flower, + .del_cls_flower = hclge_del_cls_flower, + .cls_flower_active = hclge_is_cls_flower_active, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index bd17685e4065..50a294dfaff5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -348,7 +348,8 @@ struct hclge_cfg { u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; - u16 rss_size_max; + u16 vf_rss_size_max; + u16 pf_rss_size_max; u8 phy_addr; u8 media_type; u8 mac_addr[ETH_ALEN]; @@ -564,6 +565,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE { HCLGE_FD_RULE_NONE, HCLGE_FD_ARFS_ACTIVE, HCLGE_FD_EP_ACTIVE, + HCLGE_FD_TC_FLOWER_ACTIVE, }; enum HCLGE_FD_PACKET_TYPE { @@ -572,8 +574,9 @@ enum HCLGE_FD_PACKET_TYPE { }; enum HCLGE_FD_ACTION { - HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_SELECT_QUEUE, HCLGE_FD_ACTION_DROP_PACKET, + HCLGE_FD_ACTION_SELECT_TC, }; struct hclge_fd_key_cfg { @@ -618,12 +621,20 @@ struct hclge_fd_rule { struct hclge_fd_rule_tuples tuples_mask; u32 unused_tuple; u32 flow_type; - u8 action; - u16 vf_id; + union { + struct { + unsigned long cookie; + u8 tc; + } cls_flower; + struct { + u16 flow_id; /* only used for arfs */ + } arfs; + }; u16 queue_id; + u16 vf_id; u16 location; - u16 flow_id; /* only used for arfs */ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + u8 action; }; struct hclge_fd_ad_data { @@ -637,6 +648,8 @@ struct hclge_fd_ad_data { u8 write_rule_id_to_bd; u8 next_input_key; u16 rule_id; + u16 tc_size; + u8 override_tc; }; enum HCLGE_MAC_NODE_STATE { @@ -745,7 +758,8 @@ struct hclge_dev { u16 base_tqp_pid; /* Base task tqp physical id of this PF */ u16 alloc_rss_size; /* Allocated RSS task queue */ - u16 rss_size_max; /* HW defined max RSS task queue */ + u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ + u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ @@ -850,15 +864,18 @@ struct hclge_tx_vtag_cfg { bool insert_tag2_en; /* Whether insert outer vlan tag */ u16 default_tag1; /* The default inner vlan tag to insert */ u16 default_tag2; /* The default outer vlan tag to insert */ + bool tag_shift_mode_en; }; /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ - u8 strip_tag1_en; /* Whether strip inner vlan tag */ - u8 strip_tag2_en; /* Whether strip outer vlan tag */ - u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ - u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ + bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */ + bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */ + bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */ + bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ }; struct hclge_rss_tuple_cfg { @@ -903,7 +920,7 @@ struct hclge_vport { u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ - u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; int rss_algo; /* User configured hash algorithm */ /* User configured rss tuple sets */ struct hclge_rss_tuple_cfg rss_tuple_sets; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 3ab6db2588d3..754c09ada901 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -227,6 +227,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, bool en_bc = req->msg.en_bc ? true : false; bool en_uc = req->msg.en_uc ? true : false; bool en_mc = req->msg.en_mc ? true : false; + struct hnae3_handle *handle = &vport->nic; int ret; if (!vport->vf_info.trusted) { @@ -234,6 +235,12 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, en_mc = false; } + if (req->msg.en_limit_promisc) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags); + ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc); vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0; @@ -371,7 +378,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, status = hclge_update_port_base_vlan_cfg(vport, *state, vlan_info); } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { - resp_msg->data[0] = vport->port_base_vlan_cfg.state; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + /* vf does not need to know about the port based VLAN state + * on device HNAE3_DEVICE_VERSION_V3. So always return disable + * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port + * based VLAN state. + */ + resp_msg->data[0] = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? + HNAE3_PORT_BASE_VLAN_DISABLE : + vport->port_base_vlan_cfg.state; resp_msg->len = sizeof(u8); } @@ -398,7 +414,7 @@ static void hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; unsigned int i; - for (i = 0; i < kinfo->num_tc; i++) + for (i = 0; i < kinfo->tc_info.num_tc; i++) resp_msg->data[0] |= BIT(i); resp_msg->len = sizeof(u8); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index b1026cd1ba0a..82742a64f3b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -565,7 +565,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, false); @@ -589,23 +589,66 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) return 0; } +static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + u16 max_rss_size = 0; + int i; + + if (!tc_info->mqprio_active) + return vport->alloc_tqps / tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) + continue; + if (max_rss_size < tc_info->tqp_count[i]) + max_rss_size = tc_info->tqp_count[i]; + } + + return max_rss_size; +} + +static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + int sum = 0; + int i; + + if (!tc_info->mqprio_active) + return kinfo->rss_size * tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) + sum += tc_info->tqp_count[i]; + } + + return sum; +} + static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; + u16 vport_max_rss_size; u16 max_rss_size; u8 i; /* TC configuration is shared by PF/VF in one port, only allow * one tc for VF for simplicity. VF's vport_id is non zero. */ - kinfo->num_tc = vport->vport_id ? 1 : + kinfo->tc_info.num_tc = vport->vport_id ? 1 : min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) + (vport->vport_id ? (vport->vport_id - 1) : 0); - max_rss_size = min_t(u16, hdev->rss_size_max, - vport->alloc_tqps / kinfo->num_tc); + vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max : + hdev->pf_rss_size_max; + max_rss_size = min_t(u16, vport_max_rss_size, + hclge_vport_get_max_rss_size(vport)); /* Set to user value, no larger than max_rss_size. */ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && @@ -622,34 +665,36 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) if (!kinfo->req_rss_size) max_rss_size = min_t(u16, max_rss_size, (hdev->num_nic_msi - 1) / - kinfo->num_tc); + kinfo->tc_info.num_tc); /* Set to the maximum specification value (max_rss_size). */ kinfo->rss_size = max_rss_size; } - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + /* when enable mqprio, the tc_info has been updated. */ + if (kinfo->tc_info.mqprio_active) + return; + for (i = 0; i < HNAE3_MAX_TC; i++) { - if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { + set_bit(i, &kinfo->tc_info.tc_en); + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; } else { /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; + clear_bit(i, &kinfo->tc_info.tc_en); + kinfo->tc_info.tqp_offset[i] = 0; + kinfo->tc_info.tqp_count[i] = 1; } } - memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, - sizeof_field(struct hnae3_knic_private_info, prio_tc)); + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, + sizeof_field(struct hnae3_tc_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) @@ -854,15 +899,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hnae3_queue **tqp = kinfo->tqp; - struct hnae3_tc_info *v_tc_info; u32 i, j; int ret; - for (i = 0; i < kinfo->num_tc; i++) { - v_tc_info = &kinfo->tc_info[i]; - for (j = 0; j < v_tc_info->tqp_count; j++) { - struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + for (i = 0; i < tc_info->num_tc; i++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; ret = hclge_tm_q_to_qs_map_cfg(hdev, hclge_get_queue_id(q), @@ -887,7 +931,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_to_pri_map_cfg( hdev, vport[k].qs_offset + i, i); if (ret) @@ -1001,7 +1045,7 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) u32 i; int ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, HCLGE_SHAPER_LVL_QSET, &ir_para, max_tm_rate); @@ -1123,7 +1167,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) return ret; /* Qset dwrr */ - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_weight_cfg( hdev, vport->qs_offset + i, hdev->tm_info.pg_info[0].tc_dwrr[i]); @@ -1254,7 +1298,7 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) if (ret) return ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, @@ -1484,7 +1528,7 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) for (k = 0; k < hdev->num_alloc_vport; k++) { kinfo = &vport[k].nic.kinfo; - kinfo->prio_tc[i] = prio_tc[i]; + kinfo->tc_info.prio_tc[i] = prio_tc[i]; } } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 5b2f9a56f1d8..145757cb70f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -14,6 +14,9 @@ #define HCLGEVF_RESET_MAX_FAIL_CNT 5 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay); + static struct hnae3_ae_algo ae_algovf; static struct workqueue_struct *hclgevf_wq; @@ -430,19 +433,20 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) struct hnae3_knic_private_info *kinfo; u16 new_tqps = hdev->num_tqps; unsigned int i; + u8 num_tc = 0; kinfo = &nic->kinfo; - kinfo->num_tc = 0; kinfo->num_tx_desc = hdev->num_tx_desc; kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) - kinfo->num_tc++; + num_tc++; - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); - new_tqps = kinfo->rss_size * kinfo->num_tc; + num_tc = num_tc ? num_tc : 1; + kinfo->tc_info.num_tc = num_tc; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); + new_tqps = kinfo->rss_size * num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, @@ -460,7 +464,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) * and rss size with the actual vector numbers */ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); - kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, kinfo->rss_size); return 0; @@ -1146,6 +1150,7 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { + struct hnae3_handle *handle = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1154,6 +1159,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, send_msg.en_bc = en_bc_pmc ? 1 : 0; send_msg.en_uc = en_uc_pmc ? 1 : 0; send_msg.en_mc = en_mc_pmc ? 1 : 0; + send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags) ? 1 : 0; ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (ret) @@ -1180,6 +1187,7 @@ static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + hclgevf_task_schedule(hdev, 0); } static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) @@ -3353,7 +3361,7 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) struct hnae3_knic_private_info *kinfo = &nic->kinfo; return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + hdev->num_tqps / kinfo->tc_info.num_tc); } /** @@ -3396,7 +3404,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, kinfo->req_rss_size = new_tqps_num; max_rss_size = min_t(u16, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + hdev->num_tqps / kinfo->tc_info.num_tc); /* Use the user's configuration when it is not larger than * max_rss_size, otherwise, use the maximum specification value. @@ -3408,7 +3416,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) kinfo->rss_size = max_rss_size; - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; } static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, @@ -3454,7 +3462,7 @@ out: dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 350225bbe0be..9a9b09401d01 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -313,13 +313,7 @@ static void free_rxqs(struct hinic_dev *nic_dev) static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) { - int err; - - err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); - if (err) - return err; - - return 0; + return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); } static int hinic_rss_init(struct hinic_dev *nic_dev) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cf55c5ea07cb..a2191392ca4f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -398,6 +398,8 @@ failure: dev_kfree_skb_any(pool->rx_buff[index].skb); pool->rx_buff[index].skb = NULL; } + adapter->replenish_add_buff_failure += ind_bufp->index; + atomic_add(buffers_added, &pool->available); ind_bufp->index = 0; if (lpar_rc == H_CLOSED || adapter->failover_pending) { /* Disable buffer pool replenishment and report carrier off if @@ -419,6 +421,8 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) if (adapter->rx_pool[i].active) replenish_rx_pool(adapter, &adapter->rx_pool[i]); } + + netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); } static void release_stats_buffers(struct ibmvnic_adapter *adapter) @@ -927,6 +931,7 @@ static int ibmvnic_login(struct net_device *netdev) __ibmvnic_set_mac(netdev, adapter->mac_addr); + netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state); return 0; } @@ -1358,6 +1363,10 @@ static int ibmvnic_close(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; + netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n", + adapter->state, adapter->failover_pending, + adapter->force_reset_recovery); + /* If device failover is pending, just set device state and return. * Device operation will be handled by reset routine. */ @@ -2013,8 +2022,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int i, rc; - netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", - rwi->reset_reason); + netdev_dbg(adapter->netdev, + "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", + adapter->state, adapter->failover_pending, + rwi->reset_reason, reset_state); rtnl_lock(); /* @@ -2173,6 +2184,8 @@ out: adapter->state = reset_state; rtnl_unlock(); + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n", + adapter->state, adapter->failover_pending, rc); return rc; } @@ -2242,6 +2255,8 @@ out: /* restore adapter state if reset failed */ if (rc) adapter->state = reset_state; + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n", + adapter->state, adapter->failover_pending, rc); return rc; } @@ -2352,6 +2367,11 @@ static void __ibmvnic_reset(struct work_struct *work) } clear_bit_unlock(0, &adapter->resetting); + + netdev_dbg(adapter->netdev, + "[S:%d FRR:%d WFR:%d] Done processing resets\n", + adapter->state, adapter->force_reset_recovery, + adapter->wait_for_reset); } static void __ibmvnic_delayed_reset(struct work_struct *work) @@ -2397,7 +2417,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_dbg(netdev, "Skipping matching reset\n"); + netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", + reason); spin_unlock_irqrestore(&adapter->rwi_lock, flags); ret = EBUSY; goto err; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index b30f00891c03..128ab6898070 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6475,13 +6475,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) /* Ungate PGCB clock */ mac_data = er32(FEXTNVM9); - mac_data |= BIT(28); + mac_data &= ~BIT(28); ew32(FEXTNVM9, mac_data); /* Enable K1 off to enable mPHY Power Gating */ mac_data = er32(FEXTNVM6); mac_data |= BIT(31); - ew32(FEXTNVM12, mac_data); + ew32(FEXTNVM6, mac_data); /* Enable mPHY power gating for any link and speed */ mac_data = er32(FEXTNVM8); @@ -6525,11 +6525,11 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) /* Disable K1 off */ mac_data = er32(FEXTNVM6); mac_data &= ~BIT(31); - ew32(FEXTNVM12, mac_data); + ew32(FEXTNVM6, mac_data); /* Disable Ungate PGCB clock */ mac_data = er32(FEXTNVM9); - mac_data &= ~BIT(28); + mac_data |= BIT(28); ew32(FEXTNVM9, mac_data); /* Cancel not waking from dynamic diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9f73cd7aee09..4aca637d4a23 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1861,6 +1861,7 @@ static inline bool i40e_page_is_reusable(struct page *page) * the adapter for another receive * * @rx_buffer: buffer containing the page + * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call * * If page is reusable, rx_buffer->page_offset is adjusted to point to * an unused region in the page. @@ -1883,7 +1884,8 @@ static inline bool i40e_page_is_reusable(struct page *page) * * In either case, if the page is reusable its refcount is increased. **/ -static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) +static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -1894,7 +1896,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else #define I40E_LAST_OFFSET \ @@ -1953,16 +1955,24 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring, * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use * @rx_ring: rx descriptor ring to transact packets on * @size: size of buffer to add to skb + * @rx_buffer_pgcnt: buffer page refcount * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring, - const unsigned int size) + const unsigned int size, + int *rx_buffer_pgcnt) { struct i40e_rx_buffer *rx_buffer; rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetch_page_address(rx_buffer->page); /* we are reusing so sync this buffer for CPU use */ @@ -2113,14 +2123,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, * i40e_put_rx_buffer - Clean up used buffer and either recycle or free * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: rx buffer to pull data from + * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call * * This function will clean up the contents of the rx_buffer. It will * either recycle the buffer or unmap it and free the associated resources. */ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, - struct i40e_rx_buffer *rx_buffer) + struct i40e_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { - if (i40e_can_reuse_rx_page(rx_buffer)) { + if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -2347,6 +2359,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; + int rx_buffer_pgcnt; unsigned int size; u64 qword; @@ -2389,7 +2402,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) break; i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = i40e_get_rx_buffer(rx_ring, size); + rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -2432,7 +2445,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) break; } - i40e_put_rx_buffer(rx_ring, rx_buffer); + i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); cleaned_count++; if (i40e_is_non_eop(rx_ring, rx_desc, skb)) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a0723831c4e4..56725356a17b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -304,7 +304,6 @@ struct ice_vsi { u8 irqs_ready:1; u8 current_isup:1; /* Sync 'link up' logging */ u8 stat_offsets_loaded:1; - u8 vlan_ena:1; u16 num_vlan; /* queue information */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 7db5fd977367..6d7e7dd0ebe2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -904,8 +904,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "Failed to get scheduler allocated resources\n"); + ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc; } @@ -925,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) - goto err_unroll_sched; + dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", + status); /* Initialize port_info struct with link information */ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); @@ -1044,8 +1044,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) } if (cnt == grst_timeout) { - ice_debug(hw, ICE_DBG_INIT, - "Global reset polling failed to complete.\n"); + ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } @@ -1063,16 +1062,14 @@ enum ice_status ice_check_reset(struct ice_hw *hw) for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, GLNVM_ULD) & uld_mask; if (reg == uld_mask) { - ice_debug(hw, ICE_DBG_INIT, - "Global reset processes done. %d\n", cnt); + ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; } mdelay(10); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { - ice_debug(hw, ICE_DBG_INIT, - "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); return ICE_ERR_RESET_FAILED; } @@ -1124,8 +1121,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) } if (cnt == ICE_PF_RESET_WAIT_COUNT) { - ice_debug(hw, ICE_DBG_INIT, - "PF reset polling failed to complete.\n"); + ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } @@ -1578,8 +1574,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, goto ice_acquire_res_exit; if (status) - ice_debug(hw, ICE_DBG_RES, - "resource %d acquire type %d failed.\n", res, access); + ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); /* If necessary, poll until the current lock owner timeouts */ timeout = time_left; @@ -1602,11 +1597,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, ice_acquire_res_exit: if (status == ICE_ERR_AQ_NO_WORK) { if (access == ICE_RES_WRITE) - ice_debug(hw, ICE_DBG_RES, - "resource indicates no work to do.\n"); + ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, - "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); } return status; } @@ -1792,66 +1785,53 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, switch (cap) { case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: valid_functions (bitmap) = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); - ice_debug(hw, ICE_DBG_INIT, - "%s: sr_iov_1_1 = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_DCB: caps->dcb = (number == 1); caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: dcb = %d\n", prefix, caps->dcb); - ice_debug(hw, ICE_DBG_INIT, - "%s: active_tc_bitmap = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); - ice_debug(hw, ICE_DBG_INIT, - "%s: maxtc = %d\n", prefix, caps->maxtc); + ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_size = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_entry_width = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_rxq = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, caps->num_rxq); - ice_debug(hw, ICE_DBG_INIT, - "%s: rxq_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_txq = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, caps->num_txq); - ice_debug(hw, ICE_DBG_INIT, - "%s: txq_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_msix_vectors = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); - ice_debug(hw, ICE_DBG_INIT, - "%s: msix_vector_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_PENDING_NVM_VER: @@ -1904,8 +1884,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) if (hw->dev_caps.num_funcs > 4) { /* Max 4 TCs per port */ caps->maxtc = 4; - ice_debug(hw, ICE_DBG_INIT, - "reducing maxtc to %d (based on #ports)\n", + ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); } } @@ -1973,11 +1952,9 @@ ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) GLQF_FD_SIZE_FD_BSIZE_S; func_p->fd_fltr_best_effort = val; - ice_debug(hw, ICE_DBG_INIT, - "func caps: fd_fltr_guar = %d\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", func_p->fd_fltr_guar); - ice_debug(hw, ICE_DBG_INIT, - "func caps: fd_fltr_best_effort = %d\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", func_p->fd_fltr_best_effort); } @@ -2026,8 +2003,7 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, default: /* Don't list common capabilities as unknown */ if (!found) - ice_debug(hw, ICE_DBG_INIT, - "func caps: unknown capability[%d]: 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", i, cap); break; } @@ -2160,8 +2136,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, default: /* Don't list common capabilities as unknown */ if (!found) - ice_debug(hw, ICE_DBG_INIT, - "dev caps: unknown capability[%d]: 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", i, cap); break; } @@ -2618,8 +2593,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { - ice_debug(hw, ICE_DBG_PHY, - "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", + ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", cfg->caps); cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; @@ -3067,8 +3041,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) status = ice_update_link_info(pi); if (status) - ice_debug(pi->hw, ICE_DBG_LINK, - "get link status error, status = %d\n", + ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", status); } @@ -3793,8 +3766,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, * of the endianness of the machine. */ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, - "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", + ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", f, ce_info[f].width, ce_info[f].size_of); continue; } @@ -4261,10 +4233,6 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, */ bool ice_fw_supports_link_override(struct ice_hw *hw) { - /* Currently, only supported for E810 devices */ - if (hw->mac_type != ICE_MAC_E810) - return false; - if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) return true; @@ -4296,8 +4264,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read link override TLV.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); return status; } @@ -4308,8 +4275,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, /* link options first */ status = ice_read_sr_word(hw, tlv_start, &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; @@ -4320,8 +4286,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; status = ice_read_sr_word(hw, offset, &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override phy config.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); return status; } ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; @@ -4331,8 +4296,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ @@ -4345,8 +4309,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 1f46a7828be8..4db12d1f5808 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -717,8 +717,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) if (status != ICE_ERR_AQ_FW_CRITICAL) break; - ice_debug(hw, ICE_DBG_AQ_MSG, - "Retry Admin Queue init due to FW critical error\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -813,8 +812,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); + ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); memset(desc, 0, sizeof(*desc)); memset(details, 0, sizeof(*details)); ntc++; @@ -852,8 +850,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) len = le16_to_cpu(cq_desc->datalen); - ice_debug(hw, ICE_DBG_AQ_DESC, - "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(cq_desc->opcode), le16_to_cpu(cq_desc->flags), le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); @@ -925,8 +922,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send queue not initialized.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } @@ -938,8 +934,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf) { if (buf_size > cq->sq_buf_size) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Invalid buffer size for Control Send queue: %d.\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); status = ICE_ERR_INVAL_SIZE; goto sq_send_command_error; @@ -952,8 +947,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, val = rd32(hw, cq->sq.head); if (val >= cq->num_sq_entries) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "head overrun at %d in the Control Send Queue ring\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; @@ -971,8 +965,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, * called in a separate thread in case of asynchronous completions. */ if (ice_clean_sq(hw, cq) == 0) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Error: Control Send Queue is full.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); status = ICE_ERR_AQ_FULL; goto sq_send_command_error; } @@ -1000,8 +993,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } /* Debug desc and buffer */ - ice_debug(hw, ICE_DBG_AQ_DESC, - "ATQ: Control Send queue desc and buffer:\n"); + ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); @@ -1026,8 +1018,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 copy_size = le16_to_cpu(desc->datalen); if (copy_size > buf_size) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Return len %d > than buf len %d\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); status = ICE_ERR_AQ_ERROR; } else { @@ -1036,8 +1027,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } retval = le16_to_cpu(desc->retval); if (retval) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue command 0x%04X completed with error 0x%X\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", le16_to_cpu(desc->opcode), retval); @@ -1050,8 +1040,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq->sq_last_status = (enum ice_aq_err)retval; } - ice_debug(hw, ICE_DBG_AQ_MSG, - "ATQ: desc and buffer writeback:\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); ice_debug_cq(hw, (void *)desc, buf, buf_size); @@ -1067,8 +1056,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); status = ICE_ERR_AQ_FW_CRITICAL; } else { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue Writeback timeout.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); status = ICE_ERR_AQ_TIMEOUT; } } @@ -1124,8 +1112,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Receive queue not initialized.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ret_code = ICE_ERR_AQ_EMPTY; goto clean_rq_elem_err; } @@ -1147,8 +1134,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Receive Queue Event 0x%04X received with error 0x%X\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), cq->rq_last_status); } diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 9095b4d274ad..f5e81b555353 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -709,8 +709,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw, if (!status) mutex_lock(&ice_global_cfg_lock_sw); else if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, - "Global config lock: No work to do\n"); + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; } @@ -909,8 +908,7 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) last, &offset, &info, NULL); if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Update pkg failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", status, offset, info); break; } @@ -988,8 +986,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) /* Save AQ status from download package */ hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Pkg download failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); break; @@ -1083,8 +1080,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, meta_seg->pkg_name); } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find metadata segment in driver package\n"); + ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n"); return ICE_ERR_CFG; } @@ -1101,8 +1097,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find ice segment in driver package\n"); + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); return ICE_ERR_CFG; } @@ -1318,8 +1313,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, - "OS package is not compatible with NVM.\n"); + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ break; @@ -1387,8 +1381,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) ice_init_pkg_hints(hw, seg); status = ice_download_pkg(hw, seg); if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, - "package previously loaded - no work.\n"); + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); status = 0; } @@ -3261,8 +3254,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) if (ent->profile_cookie == hdl) return true; - ice_debug(hw, ICE_DBG_INIT, - "Characteristic list for VSI group %d not found.\n", + ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", vsig); return false; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index eadc85aee389..89a0cef20506 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -708,57 +708,64 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { - struct ice_flow_prof_params params; + struct ice_flow_prof_params *params; enum ice_status status; u8 i; if (!prof) return ICE_ERR_BAD_PTR; - memset(¶ms, 0, sizeof(params)); - params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof), - GFP_KERNEL); - if (!params.prof) + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) return ICE_ERR_NO_MEMORY; + params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), + GFP_KERNEL); + if (!params->prof) { + status = ICE_ERR_NO_MEMORY; + goto free_params; + } + /* initialize extraction sequence to all invalid (0xff) */ for (i = 0; i < ICE_MAX_FV_WORDS; i++) { - params.es[i].prot_id = ICE_PROT_INVALID; - params.es[i].off = ICE_FV_OFFSET_INVAL; + params->es[i].prot_id = ICE_PROT_INVALID; + params->es[i].off = ICE_FV_OFFSET_INVAL; } - params.blk = blk; - params.prof->id = prof_id; - params.prof->dir = dir; - params.prof->segs_cnt = segs_cnt; + params->blk = blk; + params->prof->id = prof_id; + params->prof->dir = dir; + params->prof->segs_cnt = segs_cnt; /* Make a copy of the segments that need to be persistent in the flow * profile instance */ for (i = 0; i < segs_cnt; i++) - memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs)); + memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); - status = ice_flow_proc_segs(hw, ¶ms); + status = ice_flow_proc_segs(hw, params); if (status) { - ice_debug(hw, ICE_DBG_FLOW, - "Error processing a flow's packet segments\n"); + ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); goto out; } /* Add a HW profile for this flow profile */ - status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es); + status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, + params->es); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; } - INIT_LIST_HEAD(¶ms.prof->entries); - mutex_init(¶ms.prof->entries_lock); - *prof = params.prof; + INIT_LIST_HEAD(¶ms->prof->entries); + mutex_init(¶ms->prof->entries_lock); + *prof = params->prof; out: if (status) - devm_kfree(ice_hw_to_dev(hw), params.prof); + devm_kfree(ice_hw_to_dev(hw), params->prof); +free_params: + kfree(params); return status; } @@ -827,8 +834,7 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, if (!status) set_bit(vsi_handle, prof->vsis); else - ice_debug(hw, ICE_DBG_FLOW, - "HW profile add failed, %d\n", + ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", status); } @@ -859,8 +865,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, if (!status) clear_bit(vsi_handle, prof->vsis); else - ice_debug(hw, ICE_DBG_FLOW, - "HW profile remove failed, %d\n", + ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", status); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2dea4d0e9415..c52b9bb0e3ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -224,7 +224,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) if (vsi->type != ICE_VSI_PF) return 0; - if (vsi->vlan_ena) { + if (vsi->num_vlan > 1) { status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, set_promisc); } else { @@ -326,7 +326,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -340,7 +340,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -667,7 +667,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) speed = "100 M"; break; default: - speed = "Unknown"; + speed = "Unknown "; break; } @@ -3116,10 +3116,8 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, * packets aren't pruned by the device's internal switch on Rx */ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!ret) { - vsi->vlan_ena = true; + if (!ret) set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); - } return ret; } @@ -3158,7 +3156,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false); - vsi->vlan_ena = false; set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 5903a36763de..f729cd0c6224 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -55,7 +55,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * * Reads a portion of the NVM, as a flat memory space. This function correctly * breaks read requests across Shadow RAM sectors and ensures that no single - * read request exceeds the maximum 4Kb read for a single AdminQ command. + * read request exceeds the maximum 4KB read for a single AdminQ command. * * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. @@ -73,18 +73,17 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { - ice_debug(hw, ICE_DBG_NVM, - "NVM error: requested offset is beyond Shadow RAM limit\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); return ICE_ERR_PARAM; } do { u32 read_size, sector_offset; - /* ice_aq_read_nvm cannot read more than 4Kb at a time. + /* ice_aq_read_nvm cannot read more than 4KB at a time. * Additionally, a read from the Shadow RAM may not cross over * a sector boundary. Conveniently, the sector size is also - * 4Kb. + * 4KB. */ sector_offset = offset % ICE_AQ_MAX_BUF_LEN; read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, @@ -196,7 +195,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * Shadow RAM sector restrictions necessary when reading from the NVM. */ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, - (u8 *)&data_local, true); + (__force u8 *)&data_local, true); if (status) return status; @@ -397,8 +396,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, ICE_SR_BOOT_CFG_PTR); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read Boot Configuration Block TLV.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n"); return status; } @@ -406,8 +404,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) * (Combo Image Version High and Combo Image Version Low) */ if (boot_cfg_tlv_len < 2) { - ice_debug(hw, ICE_DBG_INIT, - "Invalid Boot Configuration Block TLV size.\n"); + ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n"); return ICE_ERR_INVAL_SIZE; } @@ -542,14 +539,12 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) status = ice_read_flat_nvm(hw, offset, &len, &data, false); if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { - ice_debug(hw, ICE_DBG_NVM, - "%s: New upper bound of %u bytes\n", + ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); status = 0; max_size = offset; } else if (!status) { - ice_debug(hw, ICE_DBG_NVM, - "%s: New lower bound of %u bytes\n", + ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", __func__, offset); min_size = offset; } else { @@ -558,8 +553,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) } } - ice_debug(hw, ICE_DBG_NVM, - "Predicted flash size is %u bytes\n", max_size); + ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); hw->nvm.flash_size = max_size; @@ -600,15 +594,13 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; - ice_debug(hw, ICE_DBG_NVM, - "NVM init error: unsupported blank mode.\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); return ICE_ERR_NVM_BLANK_MODE; } status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read DEV starter version.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n"); return status; } nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; @@ -629,37 +621,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) status = ice_discover_flash_size(hw); if (status) { - ice_debug(hw, ICE_DBG_NVM, - "NVM init error: failed to discover flash size.\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); return status; } - switch (hw->device_id) { - /* the following devices do not have boot_cfg_tlv yet */ - case ICE_DEV_ID_E823C_BACKPLANE: - case ICE_DEV_ID_E823C_QSFP: - case ICE_DEV_ID_E823C_SFP: - case ICE_DEV_ID_E823C_10G_BASE_T: - case ICE_DEV_ID_E823C_SGMII: - case ICE_DEV_ID_E822C_BACKPLANE: - case ICE_DEV_ID_E822C_QSFP: - case ICE_DEV_ID_E822C_10G_BASE_T: - case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822C_SFP: - case ICE_DEV_ID_E822L_BACKPLANE: - case ICE_DEV_ID_E822L_SFP: - case ICE_DEV_ID_E822L_10G_BASE_T: - case ICE_DEV_ID_E822L_SGMII: - case ICE_DEV_ID_E823L_BACKPLANE: - case ICE_DEV_ID_E823L_SFP: - case ICE_DEV_ID_E823L_10G_BASE_T: - case ICE_DEV_ID_E823L_1GBE: - case ICE_DEV_ID_E823L_QSFP: - return status; - default: - break; - } - status = ice_get_orom_ver_info(hw); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 44a228530253..f0912e44d4ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -164,8 +164,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, parent = ice_sched_find_node_by_teid(pi->root, le32_to_cpu(info->parent_teid)); if (!parent) { - ice_debug(hw, ICE_DBG_SCHED, - "Parent Node not found for parent_teid=0x%x\n", + ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); return ICE_ERR_PARAM; } @@ -704,8 +703,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "Remove rl profile failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); /* On error, free mem required */ list_del(&rl_prof_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); @@ -863,8 +861,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, for (i = 0; i < num_nodes; i++) { status = ice_sched_add_node(pi, layer, &buf->generic[i]); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "add nodes in SW DB failed status =%d\n", + ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); break; } @@ -872,8 +869,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, teid = le32_to_cpu(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); if (!new_node) { - ice_debug(hw, ICE_DBG_SCHED, - "Node is missing for teid =%d\n", teid); + ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); break; } @@ -1830,8 +1826,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) continue; if (ice_sched_is_leaf_node_present(vsi_node)) { - ice_debug(pi->hw, ICE_DBG_SCHED, - "VSI has leaf nodes in TC %d\n", i); + ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); status = ICE_ERR_IN_USE; goto exit_sched_rm_vsi_cfg; } @@ -1896,8 +1891,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &pi->rl_prof_list[ln], list_entry) { if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) - ice_debug(pi->hw, ICE_DBG_SCHED, - "Removed rl profile\n"); + ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n"); } } } @@ -2441,8 +2435,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); if (status && status != ICE_ERR_IN_USE) - ice_debug(pi->hw, ICE_DBG_SCHED, - "Remove rl profile failed\n"); + ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } if (status == ICE_ERR_IN_USE) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index c3a6c41385ee..c33612132ddf 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -537,8 +537,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; break; default: - ice_debug(pi->hw, ICE_DBG_SW, - "incorrect VSI/port type received\n"); + ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); break; } } @@ -1476,8 +1475,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, tmp_fltr_info.vsi_handle = rem_vsi_handle; status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); if (status) { - ice_debug(hw, ICE_DBG_SW, - "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", tmp_fltr_info.fwd_id.hw_vsi_id, status); return status; } @@ -1493,8 +1491,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, /* Remove the VSI list since it is no longer used */ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); if (status) { - ice_debug(hw, ICE_DBG_SW, - "Failed to remove VSI list %d, error %d\n", + ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", vsi_list_id, status); return status; } @@ -1853,8 +1850,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) */ if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { - ice_debug(hw, ICE_DBG_SW, - "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); + ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); status = ICE_ERR_CFG; goto exit; } @@ -2740,8 +2736,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) - ice_debug(hw, ICE_DBG_SW, - "counter resource could not be freed\n"); + ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); kfree(buf); return status; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 77d5eae6b4c2..a2d0aad8cfdd 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) /** * ice_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buf: buffer containing the page + * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call * * If page is reusable, we have a green light for calling ice_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is * pointing to; otherwise, the DMA mapping needs to be destroyed and * page freed */ -static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) +static bool +ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt) { unsigned int pagecnt_bias = rx_buf->pagecnt_bias; struct page *page = rx_buf->page; @@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) return false; #else #define ICE_LAST_OFFSET \ @@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) * @rx_ring: Rx descriptor ring to transact packets on * @skb: skb to be used * @size: size of buffer to add to skb + * @rx_buf_pgcnt: rx_buf page refcount * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU. */ static struct ice_rx_buf * ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, - const unsigned int size) + const unsigned int size, int *rx_buf_pgcnt) { struct ice_rx_buf *rx_buf; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; + *rx_buf_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buf->page); +#else + 0; +#endif prefetchw(rx_buf->page); *skb = rx_buf->skb; @@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, * ice_put_rx_buf - Clean up used buffer and either recycle or free * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from + * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect() * * This function will update next_to_clean and then clean up the contents * of the rx_buf. It will either recycle the buffer or unmap it and free * the associated resources. */ -static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) +static void +ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, + int rx_buf_pgcnt) { u16 ntc = rx_ring->next_to_clean + 1; @@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (!rx_buf) return; - if (ice_can_reuse_rx_page(rx_buf)) { + if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); } else { @@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) struct sk_buff *skb; unsigned int size; u16 stat_err_bits; + int rx_buf_pgcnt; u16 vlan_tag = 0; u8 rx_ptype; @@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) dma_rmb(); if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { - ice_put_rx_buf(rx_ring, NULL); + ice_put_rx_buf(rx_ring, NULL, 0); cleaned_count++; continue; } @@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ICE_RX_FLX_DESC_PKT_LEN_M; /* retrieve a buffer from the ring */ - rx_buf = ice_get_rx_buf(rx_ring, &skb, size); + rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt); if (!size) { xdp.data = NULL; @@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) total_rx_pkts++; cleaned_count++; - ice_put_rx_buf(rx_ring, rx_buf); + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); continue; construct_skb: if (skb) { @@ -1187,7 +1200,7 @@ construct_skb: break; } - ice_put_rx_buf(rx_ring, rx_buf); + ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt); cleaned_count++; /* skip if it is NOP desc */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 0286d2fceee4..aaa954aae574 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -138,6 +138,8 @@ struct vf_mac_filter { /* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_1536 1536 @@ -247,6 +249,9 @@ enum igb_tx_flags { #define IGB_SFF_ADDRESSING_MODE 0x4 #define IGB_SFF_8472_UNSUP 0x00 +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ enum igb_tx_buf_type { IGB_TYPE_SKB = 0, IGB_TYPE_XDP, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6a4ef4934fcf..03f78fdb0dcd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2824,20 +2824,25 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf) { - int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD; struct igb_adapter *adapter = netdev_priv(dev); + struct bpf_prog *prog = bpf->prog, *old_prog; bool running = netif_running(dev); - struct bpf_prog *old_prog; bool need_reset; /* verify igb ring attributes are sufficient for XDP */ for (i = 0; i < adapter->num_rx_queues; i++) { struct igb_ring *ring = adapter->rx_ring[i]; - if (frame_size > igb_rx_bufsz(ring)) + if (frame_size > igb_rx_bufsz(ring)) { + NL_SET_ERR_MSG_MOD(bpf->extack, + "The RX buffer size is too small for the frame size"); + netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n", + igb_rx_bufsz(ring), frame_size); return -EINVAL; + } } old_prog = xchg(&adapter->xdp_prog, prog); @@ -2869,7 +2874,7 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: - return igb_xdp_setup(dev, xdp->prog); + return igb_xdp_setup(dev, xdp); default: return -EINVAL; } @@ -2910,10 +2915,12 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp) */ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL; if (unlikely(!tx_ring)) - return -ENXIO; + return IGB_XDP_CONSUMED; nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf); __netif_tx_unlock(nq); @@ -2946,6 +2953,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n, nq = txring_txq(tx_ring); __netif_tx_lock(nq, cpu); + /* Avoid transmit queue timeout since we share it with the slow path */ + nq->trans_start = jiffies; + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; @@ -3950,8 +3960,7 @@ static int igb_sw_init(struct igb_adapter *adapter) /* set default work limits */ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; - adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + - VLAN_HLEN; + adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; spin_lock_init(&adapter->nfc_lock); @@ -6491,7 +6500,7 @@ static void igb_get_stats64(struct net_device *netdev, static int igb_change_mtu(struct net_device *netdev, int new_mtu) { struct igb_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; if (adapter->xdp_prog) { int i; @@ -6500,7 +6509,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_ring *ring = adapter->rx_ring[i]; if (max_frame > igb_rx_bufsz(ring)) { - netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n"); + netdev_warn(adapter->netdev, + "Requested MTU size is not supported with XDP. Max frame size is %d\n", + max_frame); return -EINVAL; } } @@ -8351,6 +8362,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ @@ -8365,6 +8377,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); + /* pull timestamp out of packet data */ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); @@ -8771,7 +8786,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_ring->skb = skb; if (xdp_xmit & IGB_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IGB_XDP_TX) { struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index fd37d2c203af..d0700d48ecf9 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -213,6 +213,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I220_V: case IGC_DEV_ID_I225_K: case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: case IGC_DEV_ID_I225_LMVP: case IGC_DEV_ID_I225_IT: case IGC_DEV_ID_I226_LM: diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 55dae7c4703f..9da5f83ce456 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -23,6 +23,7 @@ #define IGC_DEV_ID_I225_K 0x3100 #define IGC_DEV_ID_I225_K2 0x3101 #define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_K 0x5504 #define IGC_DEV_ID_I225_IT 0x0D9F #define IGC_DEV_ID_I226_LM 0x125B #define IGC_DEV_ID_I226_V 0x125C diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b673ac1199bb..afd6a62da29d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 50e6b8b6ba7b..393d1c2cd853 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1945,7 +1945,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); } -static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) +static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer, + int rx_buffer_pgcnt) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -1956,7 +1957,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) #if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) return false; #else /* The last offset is a bit aggressive in that we assume the @@ -2021,11 +2022,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, - const unsigned int size) + const unsigned int size, + int *rx_buffer_pgcnt) { struct ixgbe_rx_buffer *rx_buffer; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + *rx_buffer_pgcnt = +#if (PAGE_SIZE < 8192) + page_count(rx_buffer->page); +#else + 0; +#endif prefetchw(rx_buffer->page); *skb = rx_buffer->skb; @@ -2055,9 +2063,10 @@ skip_sync: static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb) + struct sk_buff *skb, + int rx_buffer_pgcnt) { - if (ixgbe_can_reuse_rx_page(rx_buffer)) { + if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { /* hand second half of page back to the ring */ ixgbe_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -2303,6 +2312,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; + int rx_buffer_pgcnt; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2322,7 +2332,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ dma_rmb(); - rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt); /* retrieve a buffer from the ring */ if (!skb) { @@ -2367,7 +2377,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, break; } - ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); cleaned_count++; /* place incomplete frames back on ring for completion */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 67471cb2b129..24c2bfdfec4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -497,18 +497,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) int rvu_npa_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; - int blkaddr, err; + int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return 0; /* Initialize admin queue */ - err = npa_aq_init(rvu, &hw->block[blkaddr]); - if (err) - return err; - - return 0; + return npa_aq_init(rvu, &hw->block[blkaddr]); } void rvu_npa_freemem(struct rvu *rvu) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 7bcf5246350f..56390a664517 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -12,7 +12,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct ptp_req *req; - int err; if (!ptp->nic) return -ENODEV; @@ -24,11 +23,7 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) req->op = PTP_OP_ADJFINE; req->scaled_ppm = scaled_ppm; - err = otx2_sync_mbox_msg(&ptp->nic->mbox); - if (err) - return err; - - return 0; + return otx2_sync_mbox_msg(&ptp->nic->mbox); } static u64 ptp_cc_read(const struct cyclecounter *cc) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 0f20e0788cce..25dd903a3e92 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -93,15 +93,10 @@ static int prestera_port_open(struct net_device *dev) static int prestera_port_close(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); - int err; netif_stop_queue(dev); - err = prestera_hw_port_state_set(port, false); - if (err) - return err; - - return 0; + return prestera_hw_port_state_set(port, false); } static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, @@ -318,8 +313,10 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) goto err_port_init; } - if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) + if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX) { + err = -EINVAL; goto err_port_init; + } /* firmware requires that port's MAC address consist of the first * 5 bytes of the base MAC address diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c index 0fe97155dd8f..6bc9f2487384 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_path.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -241,17 +241,13 @@ out: int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) { - int err, path; + int path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : MTK_ETH_PATH_GMAC2_SGMII; /* Setup proper MUXes along the path */ - err = mtk_eth_mux_setup(eth, path); - if (err) - return err; - - return 0; + return mtk_eth_mux_setup(eth, path); } int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 3b8576b9c2f9..f7053a74e6a8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -462,19 +462,14 @@ EXPORT_SYMBOL_GPL(mlx4_cq_free); int mlx4_init_cq_table(struct mlx4_dev *dev) { struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; - int err; spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, - dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); - if (err) - return err; - - return 0; + return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); } void mlx4_cleanup_cq_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 157f7eef92f1..32aad4d32b88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1378,8 +1378,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue) tx_ring->cons, tx_ring->prod); priv->port_stats.tx_timeout++; - en_dbg(DRV, priv, "Scheduling watchdog\n"); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { + en_dbg(DRV, priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); + } } @@ -1733,6 +1735,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_deactivate_cq(priv, cq); goto tx_err; } + clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state); if (t != TX_XDP) { tx_ring->tx_queue = netdev_get_tx_queue(dev, i); tx_ring->recycle_ring = NULL; @@ -1829,6 +1832,7 @@ int mlx4_en_start_port(struct net_device *dev) local_bh_enable(); } + clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -1999,7 +2003,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - watchdog_task); + restart_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; @@ -2376,7 +2380,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - /* NIC is probably restarting - let watchdog task reset + /* NIC is probably restarting - let restart task reset * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { @@ -2385,7 +2389,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, + &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } mutex_unlock(&mdev->state_lock); @@ -2791,7 +2797,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) if (err) { en_err(priv, "Failed starting port %d for XDP change\n", priv->port); - queue_work(mdev->workqueue, &priv->watchdog_task); + if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + queue_work(mdev->workqueue, &priv->restart_task); } } @@ -3164,7 +3171,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->watchdog_task, mlx4_en_restart); + INIT_WORK(&priv->restart_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index b15ec32758a3..31b74bddb7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } +static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe, + u16 cqe_index, struct mlx4_en_tx_ring *ring) +{ + struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_tx_info *tx_info; + struct mlx4_en_tx_desc *tx_desc; + u16 wqe_index; + int desc_size; + + en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n", + ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe), + false); + + wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask; + tx_info = &ring->tx_info[wqe_index]; + desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE; + en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn, + wqe_index, desc_size); + tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false); + + if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) + return; + + en_err(priv, "Scheduling port restart\n"); + queue_work(mdev->workqueue, &priv->restart_task); +} + int mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int napi_budget) { @@ -438,13 +467,10 @@ int mlx4_en_process_tx_cq(struct net_device *dev, dma_rmb(); if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == - MLX4_CQE_OPCODE_ERROR)) { - struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; - - en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n", - cqe_err->vendor_err_syndrome, - cqe_err->syndrome); - } + MLX4_CQE_OPCODE_ERROR)) + if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state)) + mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index, + ring); /* Skip over last polled CQE */ new_index = be16_to_cpu(cqe->wqe_index) & size_mask; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 1c50d0f22199..17f2b1919378 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -251,6 +251,10 @@ struct mlx4_en_page_cache { } buf[MLX4_EN_CACHE_SIZE]; }; +enum { + MLX4_EN_TX_RING_STATE_RECOVERING, +}; + struct mlx4_en_priv; struct mlx4_en_tx_ring { @@ -297,6 +301,7 @@ struct mlx4_en_tx_ring { * Only queue_stopped might be used if BQL is not properly working. */ unsigned long queue_stopped; + unsigned long state; struct mlx4_hwq_resources sp_wqres; struct mlx4_qp sp_qp; struct mlx4_qp_context sp_context; @@ -510,6 +515,10 @@ struct mlx4_en_stats_bitmap { struct mutex mutex; /* for mutual access to stats bitmap */ }; +enum { + MLX4_EN_STATE_FLAG_RESTARTING, +}; + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -575,7 +584,7 @@ struct mlx4_en_priv { struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct watchdog_task; + struct work_struct restart_task; struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; @@ -620,6 +629,7 @@ struct mlx4_en_priv { u32 pflags; u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; u8 rss_hash_fn; + unsigned long state; }; enum mlx4_en_wol { diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index cbe4d9746ddf..dd890f5d7b72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -272,19 +272,14 @@ EXPORT_SYMBOL_GPL(mlx4_srq_query); int mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; - int err; spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, - dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); - if (err) - return err; - - return 0; + return mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, + dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); } void mlx4_cleanup_srq_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 99f1ec3b2575..6e4d7bb7fea2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -6,6 +6,7 @@ config MLX5_CORE tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" depends on PCI + select AUXILIARY_BUS select NET_DEVLINK depends on VXLAN || !VXLAN depends on MLXFW || !MLXFW @@ -198,6 +199,7 @@ config MLX5_EN_TLS config MLX5_SW_STEERING bool "Mellanox Technologies software-managed steering" depends on MLX5_CORE_EN && MLX5_ESWITCH + select CRC32 default y help Build support for software-managed steering in the NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 83a67ca43a41..77961643d5a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ - en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o + en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o # # Netdev extra diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 1972ddd12704..b051417ede67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -31,313 +31,484 @@ */ #include <linux/mlx5/driver.h> +#include <linux/mlx5/eswitch.h> +#include <linux/mlx5/mlx5_ifc_vdpa.h> #include "mlx5_core.h" -static LIST_HEAD(intf_list); -static LIST_HEAD(mlx5_dev_list); /* intf dev list mutex */ static DEFINE_MUTEX(mlx5_intf_mutex); +static DEFINE_IDA(mlx5_adev_ida); -struct mlx5_device_context { - struct list_head list; - struct mlx5_interface *intf; - void *context; - unsigned long state; -}; +static bool is_eth_rep_supported(struct mlx5_core_dev *dev) +{ + if (!IS_ENABLED(CONFIG_MLX5_ESWITCH)) + return false; -enum { - MLX5_INTERFACE_ADDED, - MLX5_INTERFACE_ATTACHED, -}; + if (!MLX5_ESWITCH_MANAGER(dev)) + return false; + if (!is_mdev_switchdev_mode(dev)) + return false; -void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) + return true; +} + +static bool is_eth_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) + return false; - if (!mlx5_lag_intf_add(intf, priv)) - return; + if (is_eth_rep_supported(dev)) + return false; - dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); - if (!dev_ctx) - return; + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return false; - dev_ctx->intf = intf; + if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { + mlx5_core_warn(dev, "Missing eth_net_offloads capability\n"); + return false; + } - dev_ctx->context = intf->add(dev); - if (dev_ctx->context) { - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - if (intf->attach) - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); + if (!MLX5_CAP_GEN(dev, nic_flow_table)) { + mlx5_core_warn(dev, "Missing nic_flow_table capability\n"); + return false; + } - spin_lock_irq(&priv->ctx_lock); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irq(&priv->ctx_lock); + if (!MLX5_CAP_ETH(dev, csum_cap)) { + mlx5_core_warn(dev, "Missing csum_cap capability\n"); + return false; } - if (!dev_ctx->context) - kfree(dev_ctx); + if (!MLX5_CAP_ETH(dev, max_lso_cap)) { + mlx5_core_warn(dev, "Missing max_lso_cap capability\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, vlan_cap)) { + mlx5_core_warn(dev, "Missing vlan_cap capability\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) { + mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n"); + return false; + } + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.max_ft_level) < 3) { + mlx5_core_warn(dev, "max_ft_level < 3\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable)) + mlx5_core_warn(dev, "Self loop back prevention is not supported\n"); + if (!MLX5_CAP_GEN(dev, cq_moderation)) + mlx5_core_warn(dev, "CQ moderation is not supported\n"); + + return true; } -static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, - struct mlx5_priv *priv) +static bool is_vnet_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; + if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET)) + return false; - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf == intf) - return dev_ctx; - return NULL; + if (mlx5_core_is_pf(dev)) + return false; + + if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q)) + return false; + + if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) & + MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE)) + return false; + + if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type)) + return false; + + return true; } -void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +static bool is_ib_rep_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; - spin_lock_irq(&priv->ctx_lock); - list_del(&dev_ctx->list); - spin_unlock_irq(&priv->ctx_lock); + if (!is_eth_rep_supported(dev)) + return false; - if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - intf->remove(dev, dev_ctx->context); + if (!MLX5_ESWITCH_MANAGER(dev)) + return false; - kfree(dev_ctx); + if (!is_mdev_switchdev_mode(dev)) + return false; + + if (mlx5_core_mp_enabled(dev)) + return false; + + return true; } -static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv) +static bool is_mp_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); - - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; - - if (intf->attach) { - if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) - return; - if (intf->attach(dev, dev_ctx->context)) - return; - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - } else { - if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - return; - dev_ctx->context = intf->add(dev); - if (!dev_ctx->context) - return; - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - } + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; + + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; + + if (is_ib_rep_supported(dev)) + return false; + + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return false; + + if (!mlx5_core_is_mp_slave(dev)) + return false; + + return true; } -void mlx5_attach_device(struct mlx5_core_dev *dev) +static bool is_ib_supported(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx5_attach_interface(intf, priv); - mutex_unlock(&mlx5_intf_mutex); + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; + + if (is_ib_rep_supported(dev)) + return false; + + if (is_mp_supported(dev)) + return false; + + return true; +} + +enum { + MLX5_INTERFACE_PROTOCOL_ETH_REP, + MLX5_INTERFACE_PROTOCOL_ETH, + + MLX5_INTERFACE_PROTOCOL_IB_REP, + MLX5_INTERFACE_PROTOCOL_MPIB, + MLX5_INTERFACE_PROTOCOL_IB, + + MLX5_INTERFACE_PROTOCOL_VNET, +}; + +static const struct mlx5_adev_device { + const char *suffix; + bool (*is_supported)(struct mlx5_core_dev *dev); +} mlx5_adev_devices[] = { + [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet", + .is_supported = &is_vnet_supported }, + [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma", + .is_supported = &is_ib_supported }, + [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth", + .is_supported = &is_eth_supported }, + [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep", + .is_supported = &is_eth_rep_supported }, + [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep", + .is_supported = &is_ib_rep_supported }, + [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport", + .is_supported = &is_mp_supported }, +}; + +int mlx5_adev_idx_alloc(void) +{ + return ida_alloc(&mlx5_adev_ida, GFP_KERNEL); } -static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv) +void mlx5_adev_idx_free(int idx) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); - - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; - - if (intf->detach) { - if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) - return; - intf->detach(dev, dev_ctx->context); - clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - } else { - if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - return; - intf->remove(dev, dev_ctx->context); - clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - } + ida_free(&mlx5_adev_ida, idx); } -void mlx5_detach_device(struct mlx5_core_dev *dev) +int mlx5_adev_init(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx5_detach_interface(intf, priv); - mutex_unlock(&mlx5_intf_mutex); + priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices), + sizeof(struct mlx5_adev *), GFP_KERNEL); + if (!priv->adev) + return -ENOMEM; + + return 0; } -bool mlx5_device_registered(struct mlx5_core_dev *dev) +void mlx5_adev_cleanup(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - bool found = false; + struct mlx5_priv *priv = &dev->priv; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - if (priv == &dev->priv) - found = true; - mutex_unlock(&mlx5_intf_mutex); + kfree(priv->adev); +} + +static void adev_release(struct device *dev) +{ + struct mlx5_adev *mlx5_adev = + container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_priv *priv = &mlx5_adev->mdev->priv; + int idx = mlx5_adev->idx; - return found; + kfree(mlx5_adev); + priv->adev[idx] = NULL; } -void mlx5_register_device(struct mlx5_core_dev *dev) +static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx) +{ + const char *suffix = mlx5_adev_devices[idx].suffix; + struct auxiliary_device *adev; + struct mlx5_adev *madev; + int ret; + + madev = kzalloc(sizeof(*madev), GFP_KERNEL); + if (!madev) + return ERR_PTR(-ENOMEM); + + adev = &madev->adev; + adev->id = dev->priv.adev_idx; + adev->name = suffix; + adev->dev.parent = dev->device; + adev->dev.release = adev_release; + madev->mdev = dev; + madev->idx = idx; + + ret = auxiliary_device_init(adev); + if (ret) { + kfree(madev); + return ERR_PTR(ret); + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + return madev; +} + +static void del_adev(struct auxiliary_device *adev) +{ + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +int mlx5_attach_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + struct auxiliary_device *adev; + struct auxiliary_driver *adrv; + int ret = 0, i; mutex_lock(&mlx5_intf_mutex); - list_add_tail(&priv->dev_list, &mlx5_dev_list); - list_for_each_entry(intf, &intf_list, list) - mlx5_add_device(intf, priv); + for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { + if (!priv->adev[i]) { + bool is_supported = false; + + if (mlx5_adev_devices[i].is_supported) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; + } + } else { + adev = &priv->adev[i]->adev; + adrv = to_auxiliary_drv(adev->dev.driver); + + if (adrv->resume) + ret = adrv->resume(adev); + } + if (ret) { + mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", + i, mlx5_adev_devices[i].suffix); + + break; + } + } mutex_unlock(&mlx5_intf_mutex); + return ret; } -void mlx5_unregister_device(struct mlx5_core_dev *dev) +void mlx5_detach_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + struct auxiliary_device *adev; + struct auxiliary_driver *adrv; + pm_message_t pm = {}; + int i; mutex_lock(&mlx5_intf_mutex); - list_for_each_entry_reverse(intf, &intf_list, list) - mlx5_remove_device(intf, priv); - list_del(&priv->dev_list); + for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { + if (!priv->adev[i]) + continue; + + adev = &priv->adev[i]->adev; + adrv = to_auxiliary_drv(adev->dev.driver); + + if (adrv->suspend) { + adrv->suspend(adev, pm); + continue; + } + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; + } mutex_unlock(&mlx5_intf_mutex); } -int mlx5_register_interface(struct mlx5_interface *intf) +int mlx5_register_device(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - - if (!intf->add || !intf->remove) - return -EINVAL; + int ret; mutex_lock(&mlx5_intf_mutex); - list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - mlx5_add_device(intf, priv); + dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + ret = mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); + if (ret) + mlx5_unregister_device(dev); - return 0; + return ret; } -EXPORT_SYMBOL(mlx5_register_interface); -void mlx5_unregister_interface(struct mlx5_interface *intf) +void mlx5_unregister_device(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - mlx5_remove_device(intf, priv); - list_del(&intf->list); + dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); } -EXPORT_SYMBOL(mlx5_unregister_interface); -/* Must be called with intf_mutex held */ -static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +static int add_drivers(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_interface *intf; - bool found = false; - - list_for_each_entry(intf, &intf_list, list) { - if (intf->protocol == protocol) { - dev_ctx = mlx5_get_device(intf, &mdev->priv); - if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - found = true; - break; + struct mlx5_priv *priv = &dev->priv; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { + bool is_supported = false; + + if (priv->adev[i]) + continue; + + if (mlx5_adev_devices[i].is_supported) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", + i, mlx5_adev_devices[i].suffix); + /* We continue to rescan drivers and leave to the caller + * to make decision if to release everything or continue. + */ + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; } } - - return found; + return ret; } -void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) +static void delete_drivers(struct mlx5_core_dev *dev) { - mutex_lock(&mlx5_intf_mutex); - if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + struct mlx5_priv *priv = &dev->priv; + bool delete_all; + int i; + + delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + + for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { + bool is_supported = false; + + if (!priv->adev[i]) + continue; + + if (mlx5_adev_devices[i].is_supported && !delete_all) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (is_supported) + continue; + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; } - mutex_unlock(&mlx5_intf_mutex); } -/* Must be called with intf_mutex held */ -void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +/* This function is used after mlx5_core_dev is reconfigured. + */ +int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) { - struct mlx5_interface *intf; + struct mlx5_priv *priv = &dev->priv; - list_for_each_entry(intf, &intf_list, list) - if (intf->protocol == protocol) { - mlx5_add_device(intf, &dev->priv); - break; - } -} + lockdep_assert_held(&mlx5_intf_mutex); -/* Must be called with intf_mutex held */ -void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) -{ - struct mlx5_interface *intf; + delete_drivers(dev); + if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) + return 0; - list_for_each_entry(intf, &intf_list, list) - if (intf->protocol == protocol) { - mlx5_remove_device(intf, &dev->priv); - break; - } + return add_drivers(dev); } -static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) +static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) { return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | (dev->pdev->bus->number << 8) | PCI_SLOT(dev->pdev->devfn)); } -/* Must be called with intf_mutex held */ +static int next_phys_dev(struct device *dev, const void *data) +{ + struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_core_dev *mdev = madev->mdev; + const struct mlx5_core_dev *curr = data; + + if (!mlx5_core_is_pf(mdev)) + return 0; + + if (mdev == curr) + return 0; + + if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) + return 0; + + return 1; +} + +/* This function is called with two flows: + * 1. During initialization of mlx5_core_dev and we don't need to lock it. + * 2. During LAG configure stage and caller holds &mlx5_intf_mutex. + */ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) { - struct mlx5_core_dev *res = NULL; - struct mlx5_core_dev *tmp_dev; - struct mlx5_priv *priv; - u32 pci_id; + struct auxiliary_device *adev; + struct mlx5_adev *madev; if (!mlx5_core_is_pf(dev)) return NULL; - pci_id = mlx5_gen_pci_id(dev); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) { - tmp_dev = container_of(priv, struct mlx5_core_dev, priv); - if (!mlx5_core_is_pf(tmp_dev)) - continue; - - if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) { - res = tmp_dev; - break; - } - } + adev = auxiliary_find_device(NULL, dev, &next_phys_dev); + if (!adev) + return NULL; - return res; + madev = container_of(adev, struct mlx5_adev, adev); + put_device(&adev->dev); + return madev->mdev; } - void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); } - void mlx5_dev_list_unlock(void) { mutex_unlock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e2ed341648e4..3261d0dc1104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -43,7 +43,7 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, u32 running_fw, stored_fw; int err; - err = devlink_info_driver_name_put(req, DRIVER_NAME); + err = devlink_info_driver_name_put(req, KBUILD_MODNAME); if (err) return err; @@ -212,7 +212,7 @@ static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id, u8 eswitch_mode; bool smfs_cap; - eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch); + eswitch_mode = mlx5_eswitch_mode(dev); smfs_cap = mlx5_fs_dr_is_supported(dev); if (!smfs_cap) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2f05b0f9de01..a1a81cfeb607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -227,6 +227,7 @@ enum mlx5e_priv_flag { MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, MLX5E_PFLAG_XDP_TX_MPWQE, MLX5E_PFLAG_SKB_TX_MPWQE, + MLX5E_PFLAG_TX_PORT_TS, MLX5E_NUM_PFLAGS, /* Keep last */ }; @@ -282,10 +283,12 @@ struct mlx5e_cq { u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; - struct mlx5e_channel *channel; + struct mlx5e_ch_stats *ch_stats; /* control */ + struct net_device *netdev; struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; @@ -329,6 +332,15 @@ struct mlx5e_tx_mpwqe { u8 inline_on; }; +struct mlx5e_skb_fifo { + struct sk_buff **fifo; + u16 *pc; + u16 *cc; + u16 mask; +}; + +struct mlx5e_ptpsq; + struct mlx5e_txqsq { /* data path */ @@ -349,11 +361,10 @@ struct mlx5e_txqsq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; - u16 skb_fifo_mask; struct mlx5e_sq_stats *stats; struct { struct mlx5e_sq_dma *dma_fifo; - struct sk_buff **skb_fifo; + struct mlx5e_skb_fifo skb_fifo; struct mlx5e_tx_wqe_info *wqe_info; } db; void __iomem *uar_map; @@ -367,14 +378,17 @@ struct mlx5e_txqsq { unsigned int hw_mtu; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; + struct net_device *netdev; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; /* control path */ struct mlx5_wq_ctrl wq_ctrl; - struct mlx5e_channel *channel; int ch_ix; int txq_ix; u32 rate_limit; struct work_struct recover_work; + struct mlx5e_ptpsq *ptpsq; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -593,7 +607,6 @@ struct mlx5e_rq { u8 map_dir; /* dma map direction */ } buff; - struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; struct mlx5e_rq_stats *stats; @@ -602,6 +615,8 @@ struct mlx5e_rq { struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; + struct mlx5e_icosq *icosq; + struct mlx5e_priv *priv; mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_post_rx_wqes post_wqes; @@ -681,8 +696,11 @@ struct mlx5e_channel { int cpu; }; +struct mlx5e_port_ptp; + struct mlx5e_channels { struct mlx5e_channel **c; + struct mlx5e_port_ptp *port_ptp; unsigned int num; struct mlx5e_params params; }; @@ -697,6 +715,12 @@ struct mlx5e_channel_stats { struct mlx5e_xdpsq_stats xsksq; } ____cacheline_aligned_in_smp; +struct mlx5e_port_ptp_stats { + struct mlx5e_ch_stats ch; + struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; +} ____cacheline_aligned_in_smp; + enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, @@ -766,8 +790,10 @@ struct mlx5e_scratchpad { struct mlx5e_priv { /* priv data path fields - start */ - struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; + /* +1 for port ptp ts */ + struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC]; int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; + int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -802,12 +828,15 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_port_ptp_stats port_ptp_stats; u16 max_nch; u8 max_opened_tc; + bool port_ptp_opened; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; struct notifier_block events_nb; + int num_tc_x_num_ch; struct udp_tunnel_nic_info nic_info; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -923,9 +952,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_xdpsq *sq, bool is_redirect); void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); +struct mlx5e_create_cq_param { + struct napi_struct *napi; + struct mlx5e_ch_stats *ch_stats; + int node; + int ix; +}; + struct mlx5e_cq_param; -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, - struct mlx5e_cq_param *param, struct mlx5e_cq *cq); +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, + struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, + struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq); int mlx5e_open_locked(struct net_device *netdev); @@ -974,7 +1011,17 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); void mlx5e_tx_disable_queue(struct netdev_queue *txq); +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); +struct mlx5e_create_sq_param; +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn); +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index dc744702aee4..5749557749b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -287,8 +287,7 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); -bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev); +u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt); #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 69a05da0e3e3..718f8c0a4f6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = cq->channel->priv; u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; u8 hw_status; void *cqc; int err; - err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out); + err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); if (err) return err; @@ -158,10 +157,8 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv) DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); } -int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) +int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn) { - struct mlx5_core_dev *mdev = channel->mdev; - struct net_device *dev = channel->netdev; struct mlx5e_modify_sq_param msp = {}; int err; @@ -206,21 +203,22 @@ out: return err; } -int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) +int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq, + struct mlx5e_ch_stats *stats) { u32 eqe_count; - netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", eq->core.eqn, eq->core.cons_index, eq->core.irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) return -EIO; - netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n", + netdev_err(dev, "Recovered %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn); - channel->stats->eq_rearm++; + stats->eq_rearm++; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index b9aadddfd000..018262d0164b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -7,8 +7,6 @@ #include "en.h" #include "diag/rsc_dump.h" -#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) - static inline bool cqe_syndrome_needs_recover(u8 syndrome) { return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || @@ -42,8 +40,9 @@ struct mlx5e_err_ctx { void *ctx; }; -int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn); -int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel); +int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn); +int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq, + struct mlx5e_ch_stats *stats); int mlx5e_health_recover_channels(struct mlx5e_priv *priv); int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 187007ad3349..807147d97a0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -41,6 +41,15 @@ struct mlx5e_channel_param { struct mlx5e_sq_param async_icosq; }; +struct mlx5e_create_sq_param { + struct mlx5_wq_ctrl *wq_ctrl; + u32 cqn; + u32 ts_cqe_to_dest_cqn; + u32 tisn; + u8 tis_lst_sz; + u8 min_inline_mode; +}; + static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params, u16 qid, enum mlx5e_rq_group group, @@ -102,6 +111,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, /* Build queue parameters */ +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c); void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c new file mode 100644 index 000000000000..351118985a57 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2020 Mellanox Technologies + +#include "en/ptp.h" +#include "en/txrx.h" +#include "lib/clock.h" + +struct mlx5e_skb_cb_hwtstamp { + ktime_t cqe_hwtstamp; + ktime_t port_hwtstamp; +}; + +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb) +{ + memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); +} + +static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb)); + return (struct mlx5e_skb_cb_hwtstamp *)skb->cb; +} + +static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, + struct mlx5e_ptp_cq_stats *cq_stats) +{ + struct skb_shared_hwtstamps hwts = {}; + ktime_t diff; + + diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp - + mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp); + + /* Maximal allowed diff is 1 / 128 second */ + if (diff > (NSEC_PER_SEC >> 7)) { + cq_stats->abort++; + cq_stats->abort_abs_diff_ns += diff; + return; + } + + hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp; + skb_tstamp_tx(skb, &hwts); +} + +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, + ktime_t hwtstamp, + struct mlx5e_ptp_cq_stats *cq_stats) +{ + switch (hwtstamp_type) { + case (MLX5E_SKB_CB_CQE_HWTSTAMP): + mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp; + break; + case (MLX5E_SKB_CB_PORT_HWTSTAMP): + mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp; + break; + } + + /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as + * skb soon to be released. + */ + if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp || + !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp) + return; + + mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats); + memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); +} + +static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, + struct mlx5_cqe64 *cqe, + int budget) +{ + struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); + ktime_t hwtstamp; + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + ptpsq->cq_stats->err_cqe++; + goto out; + } + + hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe)); + mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, + hwtstamp, ptpsq->cq_stats); + ptpsq->cq_stats->cqe++; + +out: + napi_consume_skb(skb, budget); +} + +static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) +{ + struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); + struct mlx5_cqwq *cqwq = &cq->wq; + struct mlx5_cqe64 *cqe; + int work_done = 0; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))) + return false; + + cqe = mlx5_cqwq_get_cqe(cqwq); + if (!cqe) + return false; + + do { + mlx5_cqwq_pop(cqwq); + + mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget); + } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); + + mlx5_cqwq_update_db_record(cqwq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + return work_done == budget; +} + +static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp, + napi); + struct mlx5e_ch_stats *ch_stats = c->stats; + bool busy = false; + int work_done = 0; + int i; + + rcu_read_lock(); + + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget); + busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget); + } + + if (busy) { + work_done = budget; + goto out; + } + + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; + + ch_stats->arm++; + + for (i = 0; i < c->num_tc; i++) { + mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq); + mlx5e_cq_arm(&c->ptpsq[i].ts_cq); + } + +out: + rcu_read_unlock(); + + return work_done; +} + +static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, + struct mlx5e_ptpsq *ptpsq) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); + struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; + int err; + int node; + + sq->pdev = c->pdev; + sq->tstamp = c->tstamp; + sq->clock = &mdev->clock; + sq->mkey_be = c->mkey_be; + sq->netdev = c->netdev; + sq->priv = c->priv; + sq->mdev = mdev; + sq->ch_ix = c->ix; + sq->txq_ix = txq_ix; + sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->min_inline_mode = params->tx_min_inline_mode; + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->stats = &c->priv->port_ptp_stats.sq[tc]; + sq->ptpsq = ptpsq; + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); + sq->stop_room = param->stop_room; + + node = dev_to_node(mlx5_core_dma_dev(mdev)); + + param->wq.db_numa_node = node; + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); + if (err) + return err; + wq->db = &wq->db[MLX5_SND_DBR]; + + err = mlx5e_alloc_txqsq_db(sq, node); + if (err) + goto err_sq_wq_destroy; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + + return err; +} + +static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) +{ + mlx5_core_destroy_sq(mdev, sqn); +} + +static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) +{ + int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); + + ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), + GFP_KERNEL, numa); + if (!ptpsq->skb_fifo.fifo) + return -ENOMEM; + + ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; + ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; + ptpsq->skb_fifo.mask = wq_sz - 1; + + return 0; +} + +static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo) +{ + while (*skb_fifo->pc != *skb_fifo->cc) { + struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo); + + dev_kfree_skb_any(skb); + } +} + +static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo) +{ + mlx5e_ptp_drain_skb_fifo(skb_fifo); + kvfree(skb_fifo->fifo); +} + +static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn, + int txq_ix, struct mlx5e_ptp_params *cparams, + int tc, struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_sq_param *sqp = &cparams->txq_sq_param; + struct mlx5e_txqsq *txqsq = &ptpsq->txqsq; + struct mlx5e_create_sq_param csp = {}; + int err; + + err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp, + txqsq, tc, ptpsq); + if (err) + return err; + + csp.tisn = tisn; + csp.tis_lst_sz = 1; + csp.cqn = txqsq->cq.mcq.cqn; + csp.wq_ctrl = &txqsq->wq_ctrl; + csp.min_inline_mode = txqsq->min_inline_mode; + csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn; + + err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn); + if (err) + goto err_free_txqsq; + + err = mlx5e_ptp_alloc_traffic_db(ptpsq, + dev_to_node(mlx5_core_dma_dev(c->mdev))); + if (err) + goto err_free_txqsq; + + return 0; + +err_free_txqsq: + mlx5e_free_txqsq(txqsq); + + return err; +} + +static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_txqsq *sq = &ptpsq->txqsq; + struct mlx5_core_dev *mdev = sq->mdev; + + mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo); + cancel_work_sync(&sq->recover_work); + mlx5e_ptp_destroy_sq(mdev, sq->sqn); + mlx5e_free_txqsq_descs(sq); + mlx5e_free_txqsq(sq); +} + +static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + struct mlx5e_params *params = &cparams->params; + int ix_base; + int err; + int tc; + + ix_base = params->num_tc * params->num_channels; + + for (tc = 0; tc < params->num_tc; tc++) { + int txq_ix = ix_base + tc; + + err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, + cparams, tc, &c->ptpsq[tc]); + if (err) + goto close_txqsq; + } + + return 0; + +close_txqsq: + for (--tc; tc >= 0; tc--) + mlx5e_ptp_close_txqsq(&c->ptpsq[tc]); + + return err; +} + +static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_ptp_close_txqsq(&c->ptpsq[tc]); +} + +static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + struct mlx5e_params *params = &cparams->params; + struct mlx5e_create_cq_param ccp = {}; + struct dim_cq_moder ptp_moder = {}; + struct mlx5e_cq_param *cq_param; + int err; + int tc; + + ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); + ccp.ch_stats = c->stats; + ccp.napi = &c->napi; + ccp.ix = c->ix; + + cq_param = &cparams->txq_sq_param.cqp; + + for (tc = 0; tc < params->num_tc; tc++) { + struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; + + err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + if (err) + goto out_err_txqsq_cq; + } + + for (tc = 0; tc < params->num_tc; tc++) { + struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; + struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; + + err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + if (err) + goto out_err_ts_cq; + + ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc]; + } + + return 0; + +out_err_ts_cq: + for (--tc; tc >= 0; tc--) + mlx5e_close_cq(&c->ptpsq[tc].ts_cq); + tc = params->num_tc; +out_err_txqsq_cq: + for (--tc; tc >= 0; tc--) + mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); + + return err; +} + +static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->ptpsq[tc].ts_cq); + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); +} + +static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq; + + mlx5e_build_sq_param_common(priv, param); + + wq = MLX5_ADDR_OF(sqc, sqc, wq); + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); + param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); +} + +static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams, + struct mlx5e_params *orig) +{ + struct mlx5e_params *params = &cparams->params; + + params->tx_min_inline_mode = orig->tx_min_inline_mode; + params->num_channels = orig->num_channels; + params->hard_mtu = orig->hard_mtu; + params->sw_mtu = orig->sw_mtu; + params->num_tc = orig->num_tc; + + /* SQ */ + params->log_sq_size = orig->log_sq_size; + + mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param); +} + +static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + int err; + + err = mlx5e_ptp_open_cqs(c, cparams); + if (err) + return err; + + napi_enable(&c->napi); + + err = mlx5e_ptp_open_txqsqs(c, cparams); + if (err) + goto disable_napi; + + return 0; + +disable_napi: + napi_disable(&c->napi); + mlx5e_ptp_close_cqs(c); + + return err; +} + +static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c) +{ + mlx5e_ptp_close_txqsqs(c); + napi_disable(&c->napi); + mlx5e_ptp_close_cqs(c); +} + +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_port_ptp **cp) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_ptp_params *cparams; + struct mlx5e_port_ptp *c; + unsigned int irq; + int err; + int eqn; + + err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq); + if (err) + return err; + + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev))); + cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL); + if (!c || !cparams) + return -ENOMEM; + + c->priv = priv; + c->mdev = priv->mdev; + c->tstamp = &priv->tstamp; + c->ix = 0; + c->pdev = mlx5_core_dma_dev(priv->mdev); + c->netdev = priv->netdev; + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->num_tc = params->num_tc; + c->stats = &priv->port_ptp_stats.ch; + c->irq_desc = irq_to_desc(irq); + c->lag_port = lag_port; + + netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64); + + mlx5e_ptp_build_params(c, cparams, params); + + err = mlx5e_ptp_open_queues(c, cparams); + if (unlikely(err)) + goto err_napi_del; + + *cp = c; + + kvfree(cparams); + + return 0; + +err_napi_del: + netif_napi_del(&c->napi); + + kvfree(cparams); + kvfree(c); + return err; +} + +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c) +{ + mlx5e_ptp_close_queues(c); + netif_napi_del(&c->napi); + + kvfree(c); +} + +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq); +} + +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h new file mode 100644 index 000000000000..28aa5ae118f4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __MLX5_EN_PTP_H__ +#define __MLX5_EN_PTP_H__ + +#include "en.h" +#include "en/params.h" +#include "en_stats.h" + +struct mlx5e_ptpsq { + struct mlx5e_txqsq txqsq; + struct mlx5e_cq ts_cq; + u16 skb_fifo_cc; + u16 skb_fifo_pc; + struct mlx5e_skb_fifo skb_fifo; + struct mlx5e_ptp_cq_stats *cq_stats; +}; + +struct mlx5e_port_ptp { + /* data path */ + struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC]; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + u8 num_tc; + u8 lag_port; + + /* data path - accessed per napi poll */ + struct irq_desc *irq_desc; + struct mlx5e_ch_stats *stats; + + /* control */ + struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; + struct hwtstamp_config *tstamp; + DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); + int ix; +}; + +struct mlx5e_ptp_params { + struct mlx5e_params params; + struct mlx5e_sq_param txq_sq_param; +}; + +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_port_ptp **cp); +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c); +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c); +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c); + +enum { + MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0), + MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1), +}; + +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, + ktime_t hwtstamp, + struct mlx5e_ptp_cq_stats *cq_stats); + +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb); +#endif /* __MLX5_EN_PTP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 9913647a1faf..d80bbd17e5f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -87,7 +87,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) /* At this point, both the rq and the icosq are disabled */ - err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn); + err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn); if (err) goto out; @@ -146,17 +146,16 @@ out: static int mlx5e_rx_reporter_timeout_recover(void *ctx) { - struct mlx5e_icosq *icosq; struct mlx5_eq_comp *eq; struct mlx5e_rq *rq; int err; rq = ctx; - icosq = &rq->channel->icosq; eq = rq->cq.mcq.eq; - err = mlx5e_health_channel_eq_recover(eq, rq->channel); - if (err) - clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); + + err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats); + if (err && rq->icosq) + clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state); return err; } @@ -233,21 +232,13 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = rq->channel->priv; - struct mlx5e_icosq *icosq; - u8 icosq_hw_state; u16 wqe_counter; int wqes_sz; u8 hw_state; u16 wq_head; int err; - icosq = &rq->channel->icosq; - err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state); - if (err) - return err; - - err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state); + err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state); if (err) return err; @@ -259,7 +250,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix); + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); if (err) return err; @@ -295,9 +286,18 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); - if (err) - return err; + if (rq->icosq) { + struct mlx5e_icosq *icosq = rq->icosq; + u8 icosq_hw_state; + + err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state); + if (err) + return err; + + err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); + if (err) + return err; + } err = devlink_fmsg_obj_nest_end(fmsg); if (err) @@ -557,25 +557,29 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) { - struct mlx5e_icosq *icosq = &rq->channel->icosq; - struct mlx5e_priv *priv = rq->channel->priv; + char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {}; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_icosq *icosq = rq->icosq; + struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = rq; err_ctx.recover = mlx5e_rx_reporter_timeout_recover; err_ctx.dump = mlx5e_rx_reporter_dump_rq; + + if (icosq) + snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn); snprintf(err_str, sizeof(err_str), - "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x", - icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); + "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x", + rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); } void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) { - struct mlx5e_priv *priv = rq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 8be6eaa3eeb1..d7275c84313e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include "health.h" +#include "en/ptp.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { @@ -15,7 +16,7 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) msleep(20); } - netdev_err(sq->channel->netdev, + netdev_err(sq->netdev, "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", sq->sqn, sq->cc, sq->pc); @@ -41,8 +42,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) int err; sq = ctx; - mdev = sq->channel->mdev; - dev = sq->channel->netdev; + mdev = sq->mdev; + dev = sq->netdev; if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) return 0; @@ -68,7 +69,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) * pending WQEs. SQ can safely reset the SQ. */ - err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); + err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn); if (err) goto out; @@ -99,8 +100,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx) to_ctx = ctx; sq = to_ctx->sq; eq = sq->cq.mcq.eq; - priv = sq->channel->priv; - err = mlx5e_health_channel_eq_recover(eq, sq->channel); + priv = sq->priv; + err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats); if (!err) { to_ctx->status = 0; /* this sq recovered */ return err; @@ -141,11 +142,11 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, } static int -mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, - struct mlx5e_txqsq *sq, int tc) +mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *sq, int tc) { - struct mlx5e_priv *priv = sq->channel->priv; bool stopped = netif_xmit_stopped(sq->txq); + struct mlx5e_priv *priv = sq->priv; u8 state; int err; @@ -153,14 +154,6 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); if (err) return err; @@ -193,7 +186,24 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; - err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); + return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); +} + +static int +mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *sq, int tc) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); + if (err) + return err; + + err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); if (err) return err; @@ -204,49 +214,147 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, return 0; } -static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg, - struct netlink_ext_ack *extack) +static int +mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg, + struct mlx5e_ptpsq *ptpsq, int tc) { - struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; - u32 sq_stride, sq_sz; + int err; - int i, tc, err = 0; + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; - mutex_lock(&priv->state_lock); + err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + if (err) + return err; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; + err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); + if (err) + return err; - sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); - sq_stride = MLX5_SEND_WQE_BB; + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + if (err) + return err; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); if (err) - goto unlock; + return err; + + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int +mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *txqsq) +{ + u32 sq_stride, sq_sz; + int err; err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) - goto unlock; + return err; + + sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); + sq_stride = MLX5_SEND_WQE_BB; err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); if (err) - goto unlock; + return err; err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); if (err) - goto unlock; + return err; - err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg); + err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); if (err) - goto unlock; + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg, + struct mlx5e_ptpsq *ptpsq) +{ + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + if (err) + return err; + + err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; + struct mlx5e_ptpsq *generic_ptpsq; + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); + if (err) + return err; + + generic_ptpsq = priv->channels.port_ptp ? + &priv->channels.port_ptp->ptpsq[0] : + NULL; + if (!generic_ptpsq) + goto out; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); + if (err) + return err; err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) + return err; + +out: + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; + + int i, tc, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); if (err) goto unlock; @@ -265,6 +373,19 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, goto unlock; } } + + if (!ptp_ch) + goto close_sqs_nest; + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, + &ptp_ch->ptpsq[tc], + tc); + if (err) + goto unlock; + } + +close_sqs_nest: err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) goto unlock; @@ -338,6 +459,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { + struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; struct mlx5_rsc_key key = {}; int i, tc, err; @@ -373,6 +495,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, return err; } } + + if (ptp_ch) { + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; + + err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); + if (err) + return err; + } + } + return devlink_fmsg_arr_pair_nest_end(fmsg); } @@ -396,8 +529,8 @@ static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = sq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_priv *priv = sq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = sq; @@ -410,9 +543,9 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = sq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; struct mlx5e_tx_timeout_ctx to_ctx = {}; + struct mlx5e_priv *priv = sq->priv; struct mlx5e_err_ctx err_ctx = {}; to_ctx.sq = sq; @@ -421,7 +554,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) err_ctx.dump = mlx5e_tx_reporter_dump_sq; snprintf(err_str, sizeof(err_str), "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", - sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - sq->txq->trans_start)); mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 07ee1d236ab3..7943eb30b837 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -24,6 +24,8 @@ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) + enum mlx5e_icosq_wqe_type { MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_UMR_RX, @@ -250,21 +252,24 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, dma->type = map_type; } -static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i) +static inline +struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i) { - return &sq->db.skb_fifo[i & sq->skb_fifo_mask]; + return &fifo->fifo[i & fifo->mask]; } -static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb) +static inline +void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) { - struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++); + struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++); *skb_item = skb; } -static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq) +static inline +struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) { - return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++); + return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); } static inline void @@ -308,7 +313,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); - netdev_err(cq->channel->netdev, + netdev_err(cq->netdev, "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", cq->mcq.cqn, ci, qn, get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index be3465ba38ca..d87c345878d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -49,8 +49,11 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel *c) { struct mlx5e_channel_param *cparam; + struct mlx5e_create_cq_param ccp; int err; + mlx5e_build_create_cq_param(&ccp, c); + if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) return -EINVAL; @@ -60,7 +63,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, mlx5e_build_xsk_cparam(priv, params, xsk, cparam); - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq); + err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + &c->xskrq.cq); if (unlikely(err)) goto err_free_cparam; @@ -68,7 +72,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index f51c04284e4d..2b51d3222ca1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -276,7 +276,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; - if (mlx5_accel_is_ktls_tx(sq->channel->mdev)) + if (mlx5_accel_is_ktls_tx(sq->mdev)) return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); /* FPGA */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 42e61dc28ead..d9076d543104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -41,9 +41,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, { struct mlx5_core_dev *mdev = priv->mdev; - strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRIVER_VERSION, - sizeof(drvinfo->version)); + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), @@ -1946,6 +1944,38 @@ static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable) return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable); } +static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + return -EOPNOTSUPP; + + new_channels.params = priv->channels.params; + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable); + /* No need to verify SQ stop room as + * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both + * has the same log_sq_size. + */ + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + err = mlx5e_num_channels_changed(priv); + goto out; + } + + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_num_channels_changed_ctx, NULL); +out: + if (!err) + priv->port_ptp_opened = true; + + return err; +} + static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_cqe_moder", set_pflag_rx_cqe_based_moder }, { "tx_cqe_moder", set_pflag_tx_cqe_based_moder }, @@ -1954,6 +1984,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe }, { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe }, + { "tx_port_ts", set_pflag_tx_port_ts }, }; static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1f48f99c0997..fa8149f6eb08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -772,25 +772,31 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { }; -bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) +u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt) +{ + return ttc_tunnel_rules[tt].proto; +} + +static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type) { switch (proto_type) { case IPPROTO_GRE: return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); case IPPROTO_IPIP: case IPPROTO_IPV6: - return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); default: return false; } } -bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) +static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) { int tt; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) + if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto)) return true; } return false; @@ -798,7 +804,7 @@ bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { - return (mlx5e_any_tunnel_proto_supported(mdev) && + return (mlx5e_tunnel_any_rx_proto_supported(mdev) && MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); } @@ -899,8 +905,8 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (!mlx5e_tunnel_proto_supported(priv->mdev, - ttc_tunnel_rules[tt].proto)) + if (!mlx5e_tunnel_proto_supported_rx(priv->mdev, + ttc_tunnel_rules[tt].proto)) continue; trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_tunnel_rules[tt].etype, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 427fc376fe1a..03831650f655 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -64,6 +64,7 @@ #include "en/hv_vhca_stats.h" #include "en/devlink.h" #include "lib/mlx5.h" +#include "en/ptp.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -412,9 +413,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; + rq->priv = c->priv; rq->tstamp = c->tstamp; rq->clock = &mdev->clock; - rq->channel = c; + rq->icosq = &c->icosq; rq->ix = c->ix; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); @@ -613,14 +615,11 @@ err_rq_xdp_prog: static void mlx5e_free_rq(struct mlx5e_rq *rq) { - struct mlx5e_channel *c = rq->channel; - struct bpf_prog *old_prog = NULL; + struct bpf_prog *old_prog; int i; - /* drop_rq has neither channel nor xdp_prog. */ - if (c) - old_prog = rcu_dereference_protected(rq->xdp_prog, - lockdep_is_held(&c->priv->state_lock)); + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&rq->priv->state_lock)); if (old_prog) bpf_prog_put(old_prog); @@ -720,9 +719,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; @@ -751,8 +748,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) { - struct mlx5e_channel *c = rq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; int inlen; @@ -786,7 +782,6 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) { unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); - struct mlx5e_channel *c = rq->channel; u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); @@ -797,8 +792,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) msleep(20); } while (time_before(jiffies, exp_time)); - netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); + netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); mlx5e_reporter_rx_timeout(rq); return -ETIMEDOUT; @@ -913,7 +908,7 @@ err_free_rq: void mlx5e_activate_rq(struct mlx5e_rq *rq) { set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - mlx5e_trigger_irq(&rq->channel->icosq); + mlx5e_trigger_irq(rq->icosq); } void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -925,7 +920,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); - cancel_work_sync(&rq->channel->icosq.recover_work); + cancel_work_sync(&rq->icosq->recover_work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1089,14 +1084,14 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) mlx5_wq_destroy(&sq->wq_ctrl); } -static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { kvfree(sq->db.wqe_info); - kvfree(sq->db.skb_fifo); + kvfree(sq->db.skb_fifo.fifo); kvfree(sq->db.dma_fifo); } -static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; @@ -1104,24 +1099,26 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), GFP_KERNEL, numa); - sq->db.skb_fifo = kvzalloc_node(array_size(df_sz, - sizeof(*sq->db.skb_fifo)), + sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.skb_fifo.fifo)), GFP_KERNEL, numa); sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), GFP_KERNEL, numa); - if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) { + if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; } sq->dma_fifo_mask = df_sz - 1; - sq->skb_fifo_mask = df_sz - 1; + + sq->db.skb_fifo.pc = &sq->skb_fifo_pc; + sq->db.skb_fifo.cc = &sq->skb_fifo_cc; + sq->db.skb_fifo.mask = df_sz - 1; return 0; } -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -1138,7 +1135,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->tstamp = c->tstamp; sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; - sq->channel = c; + sq->netdev = c->netdev; + sq->mdev = c->mdev; + sq->priv = c->priv; sq->ch_ix = c->ix; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -1177,20 +1176,12 @@ err_sq_wq_destroy: return err; } -static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) { mlx5e_free_txqsq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); } -struct mlx5e_create_sq_param { - struct mlx5_wq_ctrl *wq_ctrl; - u32 cqn; - u32 tisn; - u8 tis_lst_sz; - u8 min_inline_mode; -}; - static int mlx5e_create_sq(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param, struct mlx5e_create_sq_param *csp, @@ -1215,6 +1206,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); MLX5_SET(sqc, sqc, cqn, csp->cqn); + MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn); if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); @@ -1272,10 +1264,10 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) mlx5_core_destroy_sq(mdev, sqn); } -static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, - struct mlx5e_sq_param *param, - struct mlx5e_create_sq_param *csp, - u32 *sqn) +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn) { struct mlx5e_modify_sq_param msp = {0}; int err; @@ -1338,7 +1330,7 @@ err_free_txqsq: void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { - sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@ -1351,7 +1343,7 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq) __netif_tx_unlock_bh(txq); } -static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; @@ -1376,8 +1368,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) { - struct mlx5e_channel *c = sq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_rate_limit rl = {0}; cancel_work_sync(&sq->dim.work); @@ -1391,7 +1382,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) mlx5e_free_txqsq(sq); } -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) { struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, recover_work); @@ -1518,10 +1509,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) mlx5e_free_xdpsq(sq); } -static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, +static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; unsigned int irqn; @@ -1554,25 +1546,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, } cq->mdev = mdev; + cq->netdev = priv->netdev; + cq->priv = priv; return 0; } -static int mlx5e_alloc_cq(struct mlx5e_channel *c, +static int mlx5e_alloc_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, + struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = c->priv->mdev; int err; - param->wq.buf_numa_node = cpu_to_node(c->cpu); - param->wq.db_numa_node = cpu_to_node(c->cpu); - param->eq_ix = c->ix; + param->wq.buf_numa_node = ccp->node; + param->wq.db_numa_node = ccp->node; + param->eq_ix = ccp->ix; - err = mlx5e_alloc_cq_common(mdev, param, cq); + err = mlx5e_alloc_cq_common(priv, param, cq); - cq->napi = &c->napi; - cq->channel = c; + cq->napi = ccp->napi; + cq->ch_stats = ccp->ch_stats; return err; } @@ -1636,13 +1630,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) mlx5_core_destroy_cq(cq->mdev, &cq->mcq); } -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, - struct mlx5e_cq_param *param, struct mlx5e_cq *cq) +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, + struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, + struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = priv->mdev; int err; - err = mlx5e_alloc_cq(c, param, cq); + err = mlx5e_alloc_cq(priv, param, ccp, cq); if (err) return err; @@ -1668,14 +1663,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_create_cq_param *ccp, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { - err = mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->txq_sq.cqp, &c->sq[tc].cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, + ccp, &c->sq[tc].cq); if (err) goto err_close_tx_cqs; } @@ -1810,35 +1806,52 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) +{ + *ccp = (struct mlx5e_create_cq_param) { + .napi = &c->napi, + .ch_stats = c->stats, + .node = cpu_to_node(c->cpu), + .ix = c->ix, + }; +} + static int mlx5e_open_queues(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { struct dim_cq_moder icocq_moder = {0, 0}; + struct mlx5e_create_cq_param ccp; int err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq); + mlx5e_build_create_cq_param(&ccp, c); + + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, + &c->async_icosq.cq); if (err) return err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq); + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, + &c->icosq.cq); if (err) goto err_close_async_icosq_cq; - err = mlx5e_open_tx_cqs(c, params, cparam); + err = mlx5e_open_tx_cqs(c, params, &ccp, cparam); if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + &c->xdpsq.cq); if (err) goto err_close_tx_cqs; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq); + err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + &c->rq.cq); if (err) goto err_close_xdp_tx_cqs; - err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0; + err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, + &ccp, &c->rq_xdpsq.cq) : 0; if (err) goto err_close_rx_cq; @@ -2361,6 +2374,13 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } + if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) { + err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port, + &chs->port_ptp); + if (err) + goto err_close_channels; + } + mlx5e_health_channels_update(priv); kvfree(cparam); return 0; @@ -2382,6 +2402,9 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) for (i = 0; i < chs->num; i++) mlx5e_activate_channel(chs->c[i]); + + if (chs->port_ptp) + mlx5e_ptp_activate_channel(chs->port_ptp); } #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ @@ -2408,6 +2431,9 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) { int i; + if (chs->port_ptp) + mlx5e_ptp_deactivate_channel(chs->port_ptp); + for (i = 0; i < chs->num; i++) mlx5e_deactivate_channel(chs->c[i]); } @@ -2416,6 +2442,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; + if (chs->port_ptp) + mlx5e_port_ptp_close(chs->port_ptp); + for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -2901,6 +2930,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.num_tc; num_txqs = nch * ntc; + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) + num_txqs += ntc; num_rxqs = nch * priv->profile->rq_groups; mlx5e_netdev_set_tcs(netdev, nch, ntc); @@ -2974,14 +3005,13 @@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { - int i, ch; + int i, ch, tc, num_tc; ch = priv->channels.num; + num_tc = priv->channels.params.num_tc; for (i = 0; i < ch; i++) { - int tc; - - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_txqsq *sq = &c->sq[tc]; @@ -2989,10 +3019,29 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } + + if (!priv->channels.port_ptp) + return; + + for (tc = 0; tc < num_tc; tc++) { + struct mlx5e_port_ptp *c = priv->channels.port_ptp; + struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; + + priv->txq2sq[sq->txq_ix] = sq; + priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; + } +} + +static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) +{ + /* Sync with mlx5e_select_queue. */ + WRITE_ONCE(priv->num_tc_x_num_ch, + priv->channels.params.num_tc * priv->channels.num); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { + mlx5e_update_num_tc_x_num_ch(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_xdp_tx_enable(priv); @@ -3112,7 +3161,7 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, mlx5_set_port_admin_status(mdev, state); - if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY) return; if (state == MLX5_PORT_UP) @@ -3196,6 +3245,11 @@ int mlx5e_close(struct net_device *netdev) return err; } +static void mlx5e_free_drop_rq(struct mlx5e_rq *rq) +{ + mlx5_wq_destroy(&rq->wq_ctrl); +} + static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq, struct mlx5e_rq_param *param) @@ -3219,14 +3273,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, return 0; } -static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, +static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); - return mlx5e_alloc_cq_common(mdev, param, cq); + return mlx5e_alloc_cq_common(priv, param, cq); } int mlx5e_open_drop_rq(struct mlx5e_priv *priv, @@ -3240,7 +3296,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, mlx5e_build_drop_rq_param(priv, &rq_param); - err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); + err = mlx5e_alloc_drop_cq(priv, cq, &cq_param); if (err) return err; @@ -3263,7 +3319,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, return 0; err_free_rq: - mlx5e_free_rq(drop_rq); + mlx5e_free_drop_rq(drop_rq); err_destroy_cq: mlx5e_destroy_cq(cq); @@ -3277,7 +3333,7 @@ err_free_cq: void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); - mlx5e_free_rq(drop_rq); + mlx5e_free_drop_rq(drop_rq); mlx5e_destroy_cq(&drop_rq->cq); mlx5e_free_cq(&drop_rq->cq); } @@ -4231,6 +4287,20 @@ int mlx5e_get_vf_stats(struct net_device *dev, } #endif +static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx)); + default: + return false; + } +} + static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, struct sk_buff *skb) { @@ -4273,7 +4343,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, break; case IPPROTO_IPIP: case IPPROTO_IPV6: - if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) + if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP)) return features; break; case IPPROTO_UDP: @@ -4322,6 +4392,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, tx_timeout_work); + struct net_device *netdev = priv->netdev; int i; rtnl_lock(); @@ -4330,9 +4401,9 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { struct netdev_queue *dev_queue = - netdev_get_tx_queue(priv->netdev, i); + netdev_get_tx_queue(netdev, i); struct mlx5e_txqsq *sq = priv->txq2sq[i]; if (!netif_xmit_stopped(dev_queue)) @@ -4392,7 +4463,7 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog struct bpf_prog *old_prog; old_prog = rcu_replace_pointer(rq->xdp_prog, prog, - lockdep_is_held(&rq->channel->priv->state_lock)); + lockdep_is_held(&rq->priv->state_lock)); if (old_prog) bpf_prog_put(old_prog); } @@ -4577,31 +4648,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_devlink_port = mlx5e_get_devlink_port, }; -static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) -{ - if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || - !MLX5_CAP_GEN(mdev, nic_flow_table) || - !MLX5_CAP_ETH(mdev, csum_cap) || - !MLX5_CAP_ETH(mdev, max_lso_cap) || - !MLX5_CAP_ETH(mdev, vlan_cap) || - !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) || - MLX5_CAP_FLOWTABLE(mdev, - flow_table_properties_nic_receive.max_ft_level) - < 3) { - mlx5_core_warn(mdev, - "Not creating net device, some required device capabilities are missing\n"); - return -EOPNOTSUPP; - } - if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) - mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); - if (!MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_warn(mdev, "CQ moderation is not supported\n"); - - return 0; -} - void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { @@ -4857,6 +4903,17 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) priv->netdev->udp_tunnel_nic_info = &priv->nic_info; } +static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt))) + return true; + } + return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); +} + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4902,8 +4959,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_vxlan_set_netdev_info(priv); - if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || - mlx5e_any_tunnel_proto_supported(mdev)) { + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; @@ -4920,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; } - if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_GRE | @@ -4929,7 +4985,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } - if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { netdev->hw_features |= NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6; netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | @@ -5314,10 +5370,14 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, void *ppriv) { struct net_device *netdev; + unsigned int ptp_txqs = 0; int err; + if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + ptp_txqs = profile->max_tc; + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * profile->max_tc, + nch * profile->max_tc + ptp_txqs, nch * profile->rq_groups); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); @@ -5421,13 +5481,12 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) free_netdev(netdev); } -/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying - * hardware contexts and to connect it to the current netdev. - */ -static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) +static int mlx5e_resume(struct auxiliary_device *adev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = edev->mdev; int err; if (netif_device_present(netdev)) @@ -5446,109 +5505,111 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) return 0; } -static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) +static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) { - struct mlx5e_priv *priv = vpriv; + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->netdev; - -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) - return; -#endif + struct mlx5_core_dev *mdev = priv->mdev; if (!netif_device_present(netdev)) - return; + return -ENODEV; mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); + return 0; } -static void *mlx5e_add(struct mlx5_core_dev *mdev) +static int mlx5e_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; struct net_device *netdev; + pm_message_t state = {}; void *priv; int err; int nch; - err = mlx5e_check_required_hca_cap(mdev); - if (err) - return NULL; - -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { - mlx5e_rep_register_vport_reps(mdev); - return mdev; - } -#endif - nch = mlx5e_get_max_num_channels(mdev); netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - return NULL; + return -ENOMEM; } dev_net_set(netdev, mlx5_core_net(mdev)); priv = netdev_priv(netdev); + dev_set_drvdata(&adev->dev, priv); - err = mlx5e_attach(mdev, priv); + err = mlx5e_resume(adev); if (err) { - mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err); + mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err); goto err_destroy_netdev; } err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_detach; + goto err_resume; } mlx5e_devlink_port_type_eth_set(priv); mlx5e_dcbnl_init_app(priv); - return priv; + return 0; -err_detach: - mlx5e_detach(mdev, priv); +err_resume: + mlx5e_suspend(adev, state); err_destroy_netdev: mlx5e_destroy_netdev(priv); - return NULL; + return err; } -static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) +static void mlx5e_remove(struct auxiliary_device *adev) { - struct mlx5e_priv *priv; + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); + pm_message_t state = {}; -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) { - mlx5e_rep_unregister_vport_reps(mdev); - return; - } -#endif - priv = vpriv; mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - mlx5e_detach(mdev, vpriv); + mlx5e_suspend(adev, state); mlx5e_destroy_netdev(priv); } -static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_add, - .remove = mlx5e_remove, - .attach = mlx5e_attach, - .detach = mlx5e_detach, - .protocol = MLX5_INTERFACE_PROTOCOL_ETH, +static const struct auxiliary_device_id mlx5e_id_table[] = { + { .name = MLX5_ADEV_NAME ".eth", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table); + +static struct auxiliary_driver mlx5e_driver = { + .name = "eth", + .probe = mlx5e_probe, + .remove = mlx5e_remove, + .suspend = mlx5e_suspend, + .resume = mlx5e_resume, + .id_table = mlx5e_id_table, }; -void mlx5e_init(void) +int mlx5e_init(void) { + int ret; + mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); - mlx5_register_interface(&mlx5e_interface); + ret = mlx5e_rep_init(); + if (ret) + return ret; + + ret = auxiliary_driver_register(&mlx5e_driver); + if (ret) + mlx5e_rep_cleanup(); + return ret; } void mlx5e_cleanup(void) { - mlx5_unregister_interface(&mlx5e_interface); + auxiliary_driver_unregister(&mlx5e_driver); + mlx5e_rep_cleanup(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 67247c33b9fd..989c70c1eda3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -64,7 +64,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->driver, mlx5e_rep_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), @@ -1316,16 +1315,48 @@ static const struct mlx5_eswitch_rep_ops rep_ops = { .get_proto_dev = mlx5e_vport_rep_get_proto_dev }; -void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) +static int mlx5e_rep_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct mlx5_eswitch *esw; + esw = mdev->priv.eswitch; mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH); + return 0; } -void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) +static void mlx5e_rep_remove(struct auxiliary_device *adev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = vdev->mdev; + struct mlx5_eswitch *esw; + esw = mdev->priv.eswitch; mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); } + +static const struct auxiliary_device_id mlx5e_rep_id_table[] = { + { .name = MLX5_ADEV_NAME ".eth-rep", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table); + +static struct auxiliary_driver mlx5e_rep_driver = { + .name = "eth-rep", + .probe = mlx5e_rep_probe, + .remove = mlx5e_rep_remove, + .id_table = mlx5e_rep_id_table, +}; + +int mlx5e_rep_init(void) +{ + return auxiliary_driver_register(&mlx5e_rep_driver); +} + +void mlx5e_rep_cleanup(void) +{ + auxiliary_driver_unregister(&mlx5e_rep_driver); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 8932c387d46a..988195ab1c54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -203,8 +203,8 @@ struct mlx5e_rep_sq { struct list_head list; }; -void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev); -void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev); +int mlx5e_rep_init(void); +void mlx5e_rep_cleanup(void); int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv); void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv); int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, @@ -232,6 +232,8 @@ static inline bool mlx5e_eswitch_rep(struct net_device *netdev) static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} +static inline int mlx5e_rep_init(void) { return 0; }; +static inline void mlx5e_rep_cleanup(void) {}; #endif static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 6628a0197b4e..7f5851c61218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,7 +52,6 @@ #include "en/xsk/rx.h" #include "en/health.h" #include "en/params.h" -#include "en/txrx.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -503,7 +502,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - struct mlx5e_icosq *sq = &rq->channel->icosq; + struct mlx5e_icosq *sq = rq->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); @@ -670,13 +669,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) sqcc += wi->num_wqebbs; if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { - netdev_WARN_ONCE(cq->channel->netdev, + netdev_WARN_ONCE(cq->netdev, "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - queue_work(cq->channel->priv->wq, &sq->recover_work); + queue_work(cq->priv->wq, &sq->recover_work); break; } @@ -697,7 +696,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; #endif default: - netdev_WARN_ONCE(cq->channel->netdev, + netdev_WARN_ONCE(cq->netdev, "Bad WQE type in ICOSQ WQE info: 0x%x\n", wi->wqe_type); } @@ -713,9 +712,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { - struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq; u8 umr_completed = rq->mpwqe.umr_completed; + struct mlx5e_icosq *sq = rq->icosq; int alloc_err = 0; u8 missing, i; u16 head; @@ -1218,11 +1217,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; + struct mlx5e_priv *priv = rq->priv; if (cqe_syndrome_needs_recover(err_cqe->syndrome) && !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); - queue_work(rq->channel->priv->wq, &rq->recover_work); + queue_work(priv->wq, &rq->recover_work); } } @@ -1771,8 +1771,9 @@ wq_free_wqe: int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) { + struct net_device *netdev = rq->netdev; struct mlx5_core_dev *mdev = rq->mdev; - struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = rq->priv; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -1784,15 +1785,15 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) { - netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); + netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n"); return -EINVAL; } #endif if (!rq->handle_rx_cqe) { - netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); + netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); return -EINVAL; } break; @@ -1807,13 +1808,13 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool #ifdef CONFIG_MLX5_EN_IPSEC if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - c->priv->ipsec) + priv->ipsec) rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; else #endif - rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; if (!rq->handle_rx_cqe) { - netdev_err(c->netdev, "RX handler of RQ is not set\n"); + netdev_err(netdev, "RX handler of RQ is not set\n"); return -EINVAL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 78f6a6f0a7e0..2cf2042b37c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -248,6 +248,178 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) return idx; } +static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xdpsq_red_stats) +{ + s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; + s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; + s->tx_xdp_nops += xdpsq_red_stats->nops; + s->tx_xdp_full += xdpsq_red_stats->full; + s->tx_xdp_err += xdpsq_red_stats->err; + s->tx_xdp_cqes += xdpsq_red_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xdpsq_stats) +{ + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; + s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; + s->rx_xdp_tx_nops += xdpsq_stats->nops; + s->rx_xdp_tx_full += xdpsq_stats->full; + s->rx_xdp_tx_err += xdpsq_stats->err; + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xsksq_stats) +{ + s->tx_xsk_xmit += xsksq_stats->xmit; + s->tx_xsk_mpwqe += xsksq_stats->mpwqe; + s->tx_xsk_inlnw += xsksq_stats->inlnw; + s->tx_xsk_full += xsksq_stats->full; + s->tx_xsk_err += xsksq_stats->err; + s->tx_xsk_cqes += xsksq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, + struct mlx5e_rq_stats *xskrq_stats) +{ + s->rx_xsk_packets += xskrq_stats->packets; + s->rx_xsk_bytes += xskrq_stats->bytes; + s->rx_xsk_csum_complete += xskrq_stats->csum_complete; + s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; + s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; + s->rx_xsk_csum_none += xskrq_stats->csum_none; + s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; + s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; + s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; + s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; + s->rx_xsk_wqe_err += xskrq_stats->wqe_err; + s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; + s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; + s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; + s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; + s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; + s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; + s->rx_xsk_congst_umr += xskrq_stats->congst_umr; + s->rx_xsk_arfs_err += xskrq_stats->arfs_err; +} + +static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, + struct mlx5e_rq_stats *rq_stats) +{ + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; + s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_redirect += rq_stats->xdp_redirect; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; + s->rx_recover += rq_stats->recover; +#ifdef CONFIG_MLX5_EN_TLS + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; + s->rx_tls_ctx += rq_stats->tls_ctx; + s->rx_tls_del += rq_stats->tls_del; + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; + s->rx_tls_err += rq_stats->tls_err; +#endif +} + +static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, + struct mlx5e_ch_stats *ch_stats) +{ + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; + s->ch_force_irq += ch_stats->force_irq; + s->ch_eq_rearm += ch_stats->eq_rearm; +} + +static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, + struct mlx5e_sq_stats *sq_stats) +{ + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; + s->tx_mpwqe_blks += sq_stats->mpwqe_blks; + s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_recover += sq_stats->recover; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_csum_partial += sq_stats->csum_partial; +#ifdef CONFIG_MLX5_EN_TLS + s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; + s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; + s->tx_tls_ctx += sq_stats->tls_ctx; + s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; + s->tx_tls_dump_packets += sq_stats->tls_dump_packets; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; + s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; + s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; + s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; +#endif + s->tx_cqes += sq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, + struct mlx5e_sw_stats *s) +{ + int i; + + if (!priv->port_ptp_opened) + return; + + mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch); + + for (i = 0; i < priv->max_opened_tc; i++) { + mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]); + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); + } +} + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -258,144 +430,25 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) for (i = 0; i < priv->max_nch; i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; - struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; - struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; - struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq; - struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; - struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; - struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; int j; - s->rx_packets += rq_stats->packets; - s->rx_bytes += rq_stats->bytes; - s->rx_lro_packets += rq_stats->lro_packets; - s->rx_lro_bytes += rq_stats->lro_bytes; - s->rx_ecn_mark += rq_stats->ecn_mark; - s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; - s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_complete += rq_stats->csum_complete; - s->rx_csum_complete_tail += rq_stats->csum_complete_tail; - s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; - s->rx_csum_unnecessary += rq_stats->csum_unnecessary; - s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; - s->rx_xdp_drop += rq_stats->xdp_drop; - s->rx_xdp_redirect += rq_stats->xdp_redirect; - s->rx_xdp_tx_xmit += xdpsq_stats->xmit; - s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; - s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; - s->rx_xdp_tx_nops += xdpsq_stats->nops; - s->rx_xdp_tx_full += xdpsq_stats->full; - s->rx_xdp_tx_err += xdpsq_stats->err; - s->rx_xdp_tx_cqe += xdpsq_stats->cqes; - s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; - s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; - s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; - s->rx_buff_alloc_err += rq_stats->buff_alloc_err; - s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; - s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; - s->rx_cache_reuse += rq_stats->cache_reuse; - s->rx_cache_full += rq_stats->cache_full; - s->rx_cache_empty += rq_stats->cache_empty; - s->rx_cache_busy += rq_stats->cache_busy; - s->rx_cache_waive += rq_stats->cache_waive; - s->rx_congst_umr += rq_stats->congst_umr; - s->rx_arfs_err += rq_stats->arfs_err; - s->rx_recover += rq_stats->recover; -#ifdef CONFIG_MLX5_EN_TLS - s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; - s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; - s->rx_tls_ctx += rq_stats->tls_ctx; - s->rx_tls_del += rq_stats->tls_del; - s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; - s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; - s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; - s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; - s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; - s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; - s->rx_tls_err += rq_stats->tls_err; -#endif - s->ch_events += ch_stats->events; - s->ch_poll += ch_stats->poll; - s->ch_arm += ch_stats->arm; - s->ch_aff_change += ch_stats->aff_change; - s->ch_force_irq += ch_stats->force_irq; - s->ch_eq_rearm += ch_stats->eq_rearm; + mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); + mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); + mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); /* xdp redirect */ - s->tx_xdp_xmit += xdpsq_red_stats->xmit; - s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; - s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; - s->tx_xdp_nops += xdpsq_red_stats->nops; - s->tx_xdp_full += xdpsq_red_stats->full; - s->tx_xdp_err += xdpsq_red_stats->err; - s->tx_xdp_cqes += xdpsq_red_stats->cqes; + mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); /* AF_XDP zero-copy */ - s->rx_xsk_packets += xskrq_stats->packets; - s->rx_xsk_bytes += xskrq_stats->bytes; - s->rx_xsk_csum_complete += xskrq_stats->csum_complete; - s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; - s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; - s->rx_xsk_csum_none += xskrq_stats->csum_none; - s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; - s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; - s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; - s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; - s->rx_xsk_wqe_err += xskrq_stats->wqe_err; - s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; - s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; - s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; - s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; - s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; - s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; - s->rx_xsk_congst_umr += xskrq_stats->congst_umr; - s->rx_xsk_arfs_err += xskrq_stats->arfs_err; - s->tx_xsk_xmit += xsksq_stats->xmit; - s->tx_xsk_mpwqe += xsksq_stats->mpwqe; - s->tx_xsk_inlnw += xsksq_stats->inlnw; - s->tx_xsk_full += xsksq_stats->full; - s->tx_xsk_err += xsksq_stats->err; - s->tx_xsk_cqes += xsksq_stats->cqes; + mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); + mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); for (j = 0; j < priv->max_opened_tc; j++) { - struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; - - s->tx_packets += sq_stats->packets; - s->tx_bytes += sq_stats->bytes; - s->tx_tso_packets += sq_stats->tso_packets; - s->tx_tso_bytes += sq_stats->tso_bytes; - s->tx_tso_inner_packets += sq_stats->tso_inner_packets; - s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; - s->tx_added_vlan_packets += sq_stats->added_vlan_packets; - s->tx_nop += sq_stats->nop; - s->tx_mpwqe_blks += sq_stats->mpwqe_blks; - s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; - s->tx_queue_stopped += sq_stats->stopped; - s->tx_queue_wake += sq_stats->wake; - s->tx_queue_dropped += sq_stats->dropped; - s->tx_cqe_err += sq_stats->cqe_err; - s->tx_recover += sq_stats->recover; - s->tx_xmit_more += sq_stats->xmit_more; - s->tx_csum_partial_inner += sq_stats->csum_partial_inner; - s->tx_csum_none += sq_stats->csum_none; - s->tx_csum_partial += sq_stats->csum_partial; -#ifdef CONFIG_MLX5_EN_TLS - s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; - s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; - s->tx_tls_ctx += sq_stats->tls_ctx; - s->tx_tls_ooo += sq_stats->tls_ooo; - s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; - s->tx_tls_dump_packets += sq_stats->tls_dump_packets; - s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; - s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; - s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; - s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; -#endif - s->tx_cqes += sq_stats->cqes; + mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ barrier(); } } + mlx5e_stats_grp_sw_update_stats_ptp(priv, s); } static const struct counter_desc q_stats_desc[] = { @@ -1656,6 +1709,37 @@ static const struct counter_desc ch_stats_desc[] = { { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; +static const struct counter_desc ptp_sq_stats_desc[] = { + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, +}; + +static const struct counter_desc ptp_ch_stats_desc[] = { + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, +}; + +static const struct counter_desc ptp_cq_stats_desc[] = { + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, +}; + #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) @@ -1663,6 +1747,69 @@ static const struct counter_desc ch_stats_desc[] = { #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) +#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) +#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) +#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) +{ + return priv->port_ptp_opened ? + NUM_PTP_CH_STATS + + ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) : + 0; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) +{ + int i, tc; + + if (!priv->port_ptp_opened) + return idx; + + for (i = 0; i < NUM_PTP_CH_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_ch_stats_desc[i].format); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_sq_stats_desc[i].format, tc); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_cq_stats_desc[i].format, tc); + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) +{ + int i, tc; + + if (!priv->port_ptp_opened) + return idx; + + for (i = 0; i < NUM_PTP_CH_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch, + ptp_ch_stats_desc, i); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc], + ptp_sq_stats_desc, i); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc], + ptp_cq_stats_desc, i); + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) { @@ -1784,6 +1931,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0); MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); MLX5E_DEFINE_STATS_GRP(eth_ext, 0); static MLX5E_DEFINE_STATS_GRP(tls, 0); +static MLX5E_DEFINE_STATS_GRP(ptp, 0); /* The stats groups order is opposite to the update_stats() order calls */ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { @@ -1806,6 +1954,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), + &MLX5E_STATS_GRP(ptp), }; unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 162daaadb0d8..e41fc11f2ce7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -51,6 +51,10 @@ #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) + struct counter_desc { char format[ETH_GSTRING_LEN]; size_t offset; /* Byte offset */ @@ -398,6 +402,13 @@ struct mlx5e_ch_stats { u64 eq_rearm; }; +struct mlx5e_ptp_cq_stats { + u64 cqe; + u64 err_cqe; + u64 abort; + u64 abort_abs_diff_ns; +}; + struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce710f22b1ff..4cdf834fa74a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -271,8 +271,6 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, return 0; } -#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) - static struct mlx5_tc_ct_priv * get_ct_priv(struct mlx5e_priv *priv) { @@ -280,7 +278,7 @@ get_ct_priv(struct mlx5e_priv *priv) struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; - if (esw_offloads_mode(esw)) { + if (is_mdev_switchdev_mode(priv->mdev)) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; @@ -297,7 +295,7 @@ mlx5_tc_rule_insert(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw_offloads_mode(esw)) + if (is_mdev_switchdev_mode(priv->mdev)) return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); return mlx5e_add_offloaded_nic_rule(priv, spec, attr); @@ -310,7 +308,7 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw_offloads_mode(esw)) { + if (is_mdev_switchdev_mode(priv->mdev)) { mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d97203cf6a00..e47e2a0059d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -32,6 +32,7 @@ #include <linux/tcp.h> #include <linux/if_vlan.h> +#include <linux/ptp_classify.h> #include <net/geneve.h> #include <net/dsfield.h> #include "en.h" @@ -39,6 +40,7 @@ #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" #include "lib/clock.h" +#include "en/ptp.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) { @@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb } #endif +static bool mlx5e_use_ptpsq(struct sk_buff *skb) +{ + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return false; + + if (fk.basic.n_proto == htons(ETH_P_1588)) + return true; + + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) + return false; + + return (fk.basic.ip_proto == IPPROTO_UDP && + fk.ports.dst == htons(PTP_EV_PORT)); +} + +static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int up = 0; + + if (!netdev_get_num_tc(dev)) + goto return_txq; + +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) + up = mlx5e_get_dscp_up(priv, skb); + else +#endif + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get_prio(skb); + +return_txq: + return priv->port_ptp_tc2realtxq[up]; +} + u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { - int txq_ix = netdev_pick_tx(dev, skb, NULL); struct mlx5e_priv *priv = netdev_priv(dev); + int txq_ix; int up = 0; int ch_ix; + if (unlikely(priv->channels.port_ptp)) { + int num_tc_x_num_ch; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb); + + /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ + num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); + + txq_ix = netdev_pick_tx(dev, skb, NULL); + /* Fix netdev_pick_tx() not to choose ptp_channel txqs. + * If they are selected, switch to regular queues. + * Driver to select these queues only at mlx5e_select_ptpsq(). + */ + if (unlikely(txq_ix >= num_tc_x_num_ch)) + txq_ix %= num_tc_x_num_ch; + } else { + txq_ix = netdev_pick_tx(dev, skb, NULL); + } + if (!netdev_get_num_tc(dev)) return txq_ix; @@ -402,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_tx_check_stop(sq); + if (unlikely(sq->ptpsq)) { + mlx5e_skb_cb_hwtstamp_init(skb); + mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); + skb_get(skb); + } + send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); if (send_doorbell) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); @@ -579,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, goto err_unmap; mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); - mlx5e_skb_fifo_push(sq, skb); + mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); mlx5e_tx_mpwqe_add_dseg(sq, &txd); @@ -707,7 +774,11 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, u64 ts = get_cqe_ts(cqe); hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts); - skb_tstamp_tx(skb, &hwts); + if (sq->ptpsq) + mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, + hwts.hwtstamp, sq->ptpsq->cq_stats); + else + skb_tstamp_tx(skb, &hwts); } napi_consume_skb(skb, napi_budget); @@ -719,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t int i; for (i = 0; i < wi->num_fifo_pkts; i++) { - struct sk_buff *skb = mlx5e_skb_fifo_pop(sq); + struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo); mlx5e_consume_skb(sq, skb, cqe, napi_budget); } @@ -805,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); - queue_work(cq->channel->priv->wq, - &sq->recover_work); + queue_work(cq->priv->wq, &sq->recover_work); } stats->cqe_err++; } @@ -840,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_ int i; for (i = 0; i < wi->num_fifo_pkts; i++) - dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq)); + dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo)); } void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index d5868670f8a5..1ec3d62f026d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) napi_schedule(cq->napi); cq->event_ctr++; - cq->channel->stats->events++; + cq->ch_stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - struct mlx5e_channel *c = cq->channel; - struct net_device *netdev = c->netdev; + struct net_device *netdev = cq->netdev; netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", __func__, mcq->cqn, event); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4ea5d6ddf56a..fc0afa03d407 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, eqe = next_eqe_sw(eq); if (!eqe) - goto out; + return 0; do { struct mlx5_core_cq *cq; @@ -161,8 +161,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, ++eq->cons_index; } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); - -out: eq_update_ci(eq, 1); if (cqn != -1) @@ -250,9 +248,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ++eq->cons_index; } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); + eq_update_ci(eq, 1); out: - eq_update_ci(eq, 1); mlx5_eq_async_int_unlock(eq_async, recovery, &flags); return unlikely(recovery) ? num_eqes : 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index d46f8b225ebe..2b85d4777303 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -101,7 +101,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, vport->egress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); - if (IS_ERR_OR_NULL(vport->egress.acl)) { + if (IS_ERR(vport->egress.acl)) { err = PTR_ERR(vport->egress.acl); vport->egress.acl = NULL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index c3faae67e4d6..4c74e2690d57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport table_size++; vport->egress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); - if (IS_ERR_OR_NULL(vport->egress.acl)) { + if (IS_ERR(vport->egress.acl)) { err = PTR_ERR(vport->egress.acl); vport->egress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index b68976b378b8..d64fad2823e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, vport->ingress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, table_size); - if (IS_ERR_OR_NULL(vport->ingress.acl)) { + if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index 4e55d7225a26..548c005ea633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, vport->ingress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, num_ftes); - if (IS_ERR_OR_NULL(vport->ingress.acl)) { + if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 7f8c4a957f72..da901e364656 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1641,8 +1641,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) if (mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_rescan_drivers(esw->dev); err = esw_offloads_enable(esw); } @@ -1660,10 +1659,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) abort: esw->mode = MLX5_ESWITCH_NONE; - if (mode == MLX5_ESWITCH_OFFLOADS) { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - } + if (mode == MLX5_ESWITCH_OFFLOADS) + mlx5_rescan_drivers(esw->dev); + esw_destroy_tsar(esw); return err; } @@ -1724,10 +1722,9 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_lag_update(esw->dev); - if (old_mode == MLX5_ESWITCH_OFFLOADS) { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - } + if (old_mode == MLX5_ESWITCH_OFFLOADS) + mlx5_rescan_drivers(esw->dev); + esw_destroy_tsar(esw); if (clear_vf) @@ -2466,8 +2463,10 @@ free_out: return err; } -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { + struct mlx5_eswitch *esw = dev->priv.eswitch; + return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c9c2962ad49f..2f6a0ae20650 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -484,7 +484,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, } } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = fwd_fdb, + dest[i].ft = fwd_fdb; i++; mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); @@ -1680,7 +1680,6 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) goto out_free; } - memset(flow_group_in, 0, inlen); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); misc = MLX5_ADDR_OF(fte_match_param, match_criteria, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index cac8f085b16d..97d96fc38a65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -39,7 +39,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev, struct mlx5e_priv *priv = mlx5i_epriv(dev); mlx5e_ethtool_get_drvinfo(priv, drvinfo); - strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]", + strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]", sizeof(drvinfo->driver)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 33081b24f10a..f3d45ef082cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -243,24 +243,30 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) #endif } -static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev) +static void mlx5_lag_add_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_add_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (!ldev->pf[i].dev) + continue; + + ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(ldev->pf[i].dev); + } } -static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev) +static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_remove_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (!ldev->pf[i].dev) + continue; + + ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(ldev->pf[i].dev); + } } static void mlx5_do_bond(struct mlx5_lag *ldev) @@ -290,20 +296,21 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) #endif if (roce_lag) - mlx5_lag_remove_ib_devices(ldev); + mlx5_lag_remove_devices(ldev); err = mlx5_activate_lag(ldev, &tracker, roce_lag ? MLX5_LAG_FLAG_ROCE : MLX5_LAG_FLAG_SRIOV); if (err) { if (roce_lag) - mlx5_lag_add_ib_devices(ldev); + mlx5_lag_add_devices(ldev); return; } if (roce_lag) { - mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_enable_roce(dev1); } } else if (do_bond && __mlx5_lag_is_active(ldev)) { @@ -312,7 +319,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) roce_lag = __mlx5_lag_is_roce(ldev); if (roce_lag) { - mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_disable_roce(dev1); } @@ -321,7 +329,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) return; if (roce_lag) - mlx5_lag_add_ib_devices(ldev); + mlx5_lag_add_devices(ldev); } } @@ -596,6 +604,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); + + return; } /* Must be called with intf_mutex held */ @@ -739,24 +749,6 @@ unlock: } EXPORT_SYMBOL(mlx5_lag_get_slave_port); -bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) -{ - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, - priv); - struct mlx5_lag *ldev; - - if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB) - return true; - - ldev = mlx5_lag_dev_get(dev); - if (!ldev || !__mlx5_lag_is_roce(ldev) || - ldev->pf[MLX5_LAG_P1].dev == dev) - return true; - - /* If bonded, we do not add an IB device for PF1. */ - return false; -} - int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d86f06f14cd3..c08315b51fd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -50,6 +50,7 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif +#include <linux/version.h> #include <net/devlink.h> #include "mlx5_core.h" #include "lib/eq.h" @@ -76,7 +77,6 @@ MODULE_AUTHOR("Eli Cohen <[email protected]>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRIVER_VERSION); unsigned int mlx5_core_debug_mask; module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644); @@ -227,13 +227,16 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_NAME, remaining_size); + strncat(string, KBUILD_MODNAME, remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_VERSION, remaining_size); + + snprintf(string + strlen(string), remaining_size, "%u.%u.%u", + (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), + (u16)(LINUX_VERSION_CODE & 0xffff)); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -309,7 +312,7 @@ static int request_bar(struct pci_dev *pdev) return -ENODEV; } - err = pci_request_regions(pdev, DRIVER_NAME); + err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); @@ -1219,14 +1222,21 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); if (err) goto err_devlink_reg; - mlx5_register_device(dev); + + err = mlx5_register_device(dev); } else { - mlx5_attach_device(dev); + err = mlx5_attach_device(dev); } + if (err) + goto err_register; + mutex_unlock(&dev->intf_state_mutex); return 0; +err_register: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); @@ -1303,8 +1313,14 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) if (err) goto err_pagealloc_init; + err = mlx5_adev_init(dev); + if (err) + goto err_adev_init; + return 0; +err_adev_init: + mlx5_pagealloc_cleanup(dev); err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: @@ -1321,6 +1337,7 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; + mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); @@ -1331,7 +1348,6 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&dev->intf_state_mutex); } -#define MLX5_IB_MOD "mlx5_ib" static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; @@ -1351,6 +1367,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ? MLX5_COREDEV_VF : MLX5_COREDEV_PF; + dev->priv.adev_idx = mlx5_adev_idx_alloc(); + if (dev->priv.adev_idx < 0) + return dev->priv.adev_idx; + err = mlx5_mdev_init(dev, prof_sel); if (err) goto mdev_init_err; @@ -1369,8 +1389,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_load_one; } - request_module_nowait(MLX5_IB_MOD); - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1384,6 +1402,7 @@ err_load_one: pci_init_err: mlx5_mdev_uninit(dev); mdev_init_err: + mlx5_adev_idx_free(dev->priv.adev_idx); mlx5_devlink_free(devlink); return err; @@ -1400,6 +1419,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_unload_one(dev, true); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); + mlx5_adev_idx_free(dev->priv.adev_idx); mlx5_devlink_free(devlink); } @@ -1614,7 +1634,7 @@ void mlx5_recover_device(struct mlx5_core_dev *dev) } static struct pci_driver mlx5_core_driver = { - .name = DRIVER_NAME, + .name = KBUILD_MODNAME, .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, @@ -1640,6 +1660,9 @@ static int __init init(void) { int err; + WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME), + "mlx5_core name not in sync with kernel module name"); + get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); @@ -1651,7 +1674,11 @@ static int __init init(void) goto err_debug; #ifdef CONFIG_MLX5_CORE_EN - mlx5e_init(); + err = mlx5e_init(); + if (err) { + pci_unregister_driver(&mlx5_core_driver); + goto err_debug; + } #endif return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 9d00efa9e6bc..0a0302ce7144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -42,9 +42,6 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/driver.h> -#define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "5.0-0" - extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ @@ -185,22 +182,20 @@ void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_stop(struct mlx5_core_dev *dev); -void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv); -void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); -void mlx5_attach_device(struct mlx5_core_dev *dev); +int mlx5_adev_idx_alloc(void); +void mlx5_adev_idx_free(int idx); +void mlx5_adev_cleanup(struct mlx5_core_dev *dev); +int mlx5_adev_init(struct mlx5_core_dev *dev); + +int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); -bool mlx5_device_registered(struct mlx5_core_dev *dev); -void mlx5_register_device(struct mlx5_core_dev *dev); +int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); -void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); -bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); - int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); @@ -219,7 +214,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, int mlx5_fw_version_query(struct mlx5_core_dev *dev, u32 *running_ver, u32 *stored_ver); -void mlx5e_init(void); +int mlx5e_init(void); void mlx5e_cleanup(void); static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) @@ -239,7 +234,17 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } -void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); +int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); +static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) +{ + int ret; + + mlx5_dev_list_lock(); + ret = mlx5_rescan_drivers_locked(dev); + mlx5_dev_list_unlock(); + return ret; +} + void mlx5_lag_update(struct mlx5_core_dev *dev); enum { diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1077ed2046fe..2a89b3261f00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -581,6 +581,13 @@ mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); } +enum mlxsw_reg_tunnel_port { + MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_TUNNEL_PORT_VPLS, + MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL0, + MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL1, +}; + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -738,13 +745,6 @@ MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27, MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0, 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false); -enum mlxsw_reg_sfn_tunnel_port { - MLXSW_REG_SFN_TUNNEL_PORT_NVE, - MLXSW_REG_SFN_TUNNEL_PORT_VPLS, - MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_sfn_uc_tunnel_port * Tunnel port. * Reserved on Spectrum. @@ -821,8 +821,16 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN); +/* reg_spvid_tport + * Port is tunnel port. + * Reserved when SwitchX/-2 or Spectrum-1. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1); + /* reg_spvid_local_port - * Local port number. + * When tport = 0: Local port number. Not supported for CPU port. + * When tport = 1: Tunnel port. * Access: Index */ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); @@ -1693,6 +1701,109 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, mlxsw_reg_svfa_vid_set(payload, vid); } +/* SPVTR - Switch Port VLAN Stacking Register + * ------------------------------------------ + * The Switch Port VLAN Stacking register configures the VLAN mode of the port + * to enable VLAN stacking. + */ +#define MLXSW_REG_SPVTR_ID 0x201D +#define MLXSW_REG_SPVTR_LEN 0x10 + +MLXSW_REG_DEFINE(spvtr, MLXSW_REG_SPVTR_ID, MLXSW_REG_SPVTR_LEN); + +/* reg_spvtr_tport + * Port is tunnel port. + * Access: Index + * + * Note: Reserved when SwitchX/-2 or Spectrum-1. + */ +MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1); + +/* reg_spvtr_local_port + * When tport = 0: local port number (Not supported from/to CPU). + * When tport = 1: tunnel port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8); + +/* reg_spvtr_ippe + * Ingress Port Prio Mode Update Enable. + * When set, the Port Prio Mode is updated with the provided ipprio_mode field. + * Reserved on Get operations. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, ippe, 0x04, 31, 1); + +/* reg_spvtr_ipve + * Ingress Port VID Mode Update Enable. + * When set, the Ingress Port VID Mode is updated with the provided ipvid_mode + * field. + * Reserved on Get operations. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, ipve, 0x04, 30, 1); + +/* reg_spvtr_epve + * Egress Port VID Mode Update Enable. + * When set, the Egress Port VID Mode is updated with the provided epvid_mode + * field. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, epve, 0x04, 29, 1); + +/* reg_spvtr_ipprio_mode + * Ingress Port Priority Mode. + * This controls the PCP and DEI of the new outer VLAN + * Note: for SwitchX/-2 the DEI is not affected. + * 0: use port default PCP and DEI (configured by QPDPC). + * 1: use C-VLAN PCP and DEI. + * Has no effect when ipvid_mode = 0. + * Reserved when tport = 1. + * Access: RW + */ +MLXSW_ITEM32(reg, spvtr, ipprio_mode, 0x04, 20, 4); + +enum mlxsw_reg_spvtr_ipvid_mode { + /* IEEE Compliant PVID (default) */ + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID, + /* Push VLAN (for VLAN stacking, except prio tagged packets) */ + MLXSW_REG_SPVTR_IPVID_MODE_PUSH_VLAN_FOR_UNTAGGED_PACKET, + /* Always push VLAN (also for prio tagged packets) */ + MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN, +}; + +/* reg_spvtr_ipvid_mode + * Ingress Port VLAN-ID Mode. + * For Spectrum family, this affects the values of SPVM.i + * Access: RW + */ +MLXSW_ITEM32(reg, spvtr, ipvid_mode, 0x04, 16, 4); + +enum mlxsw_reg_spvtr_epvid_mode { + /* IEEE Compliant VLAN membership */ + MLXSW_REG_SPVTR_EPVID_MODE_IEEE_COMPLIANT_VLAN_MEMBERSHIP, + /* Pop VLAN (for VLAN stacking) */ + MLXSW_REG_SPVTR_EPVID_MODE_POP_VLAN, +}; + +/* reg_spvtr_epvid_mode + * Egress Port VLAN-ID Mode. + * For Spectrum family, this affects the values of SPVM.e,u,pt. + * Access: WO + */ +MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4); + +static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport, + u8 local_port, + enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode) +{ + MLXSW_REG_ZERO(spvtr, payload); + mlxsw_reg_spvtr_tport_set(payload, tport); + mlxsw_reg_spvtr_local_port_set(payload, local_port); + mlxsw_reg_spvtr_ipvid_mode_set(payload, ipvid_mode); + mlxsw_reg_spvtr_ipve_set(payload, true); +} + /* SVPE - Switch Virtual-Port Enabling Register * -------------------------------------------- * Enables port virtualization. @@ -10507,13 +10618,6 @@ enum mlxsw_reg_tnumt_record_type { */ MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4); -enum mlxsw_reg_tnumt_tunnel_port { - MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, - MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS, - MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_tnumt_tunnel_port * Tunnel port. * Access: RW @@ -10561,7 +10665,7 @@ MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false); static inline void mlxsw_reg_tnumt_pack(char *payload, enum mlxsw_reg_tnumt_record_type type, - enum mlxsw_reg_tnumt_tunnel_port tport, + enum mlxsw_reg_tunnel_port tport, u32 underlay_mc_ptr, bool vnext, u32 next_underlay_mc_ptr, u8 record_size) @@ -10725,13 +10829,6 @@ static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn, MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN); -enum mlxsw_reg_tnpc_tunnel_port { - MLXSW_REG_TNPC_TUNNEL_PORT_NVE, - MLXSW_REG_TNPC_TUNNEL_PORT_VPLS, - MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_tnpc_tunnel_port * Tunnel port. * Access: Index @@ -10751,7 +10848,7 @@ MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1); MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1); static inline void mlxsw_reg_tnpc_pack(char *payload, - enum mlxsw_reg_tnpc_tunnel_port tport, + enum mlxsw_reg_tunnel_port tport, bool learn_enable) { MLXSW_REG_ZERO(tnpc, payload); @@ -11320,6 +11417,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(slcor), MLXSW_REG(spmlr), MLXSW_REG(svfa), + MLXSW_REG(spvtr), MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 963eb0b1d9dd..df8175cd44ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -384,7 +384,7 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, return err; } -static int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) +int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) { switch (ethtype) { case ETH_P_8021Q: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6092243a69cb..a6956cfc9cb1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -584,6 +584,7 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, bool learn_enable); +int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u16 ethtype); struct mlxsw_sp_port_vlan * @@ -1202,6 +1203,7 @@ struct mlxsw_sp_nve_params { enum mlxsw_sp_nve_type type; __be32 vni; const struct net_device *dev; + u16 ethertype; }; extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 54d3e7dcd303..e5ec595593f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -368,7 +368,7 @@ mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record) next_valid = true; } - mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE, mc_record->kvdl_index, next_valid, next_kvdl_index, mc_record->num_entries); @@ -798,11 +798,11 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, ops = nve->nve_ops_arr[params->type]; - if (!ops->can_offload(nve, params->dev, extack)) + if (!ops->can_offload(nve, params, extack)) return -EINVAL; memset(&config, 0, sizeof(config)); - ops->nve_config(nve, params->dev, &config); + ops->nve_config(nve, params, &config); if (nve->num_nve_tunnels && memcmp(&config, &nve->config, sizeof(config))) { NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 12f664f42f21..2796d3659979 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -18,6 +18,7 @@ struct mlxsw_sp_nve_config { u32 ul_tb_id; enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr ul_sip; + u16 ethertype; }; struct mlxsw_sp_nve { @@ -35,10 +36,10 @@ struct mlxsw_sp_nve { struct mlxsw_sp_nve_ops { enum mlxsw_sp_nve_type type; bool (*can_offload)(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack); void (*nve_config)(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config); int (*init)(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 05517c7feaa5..3e2bb22e9ca6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -22,10 +22,10 @@ VXLAN_F_LEARN) static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) { - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; if (cfg->saddr.sa.sa_family != AF_INET) { @@ -86,11 +86,23 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return true; } +static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, + const struct mlxsw_sp_nve_params *params, + struct netlink_ext_ack *extack) +{ + if (params->ethertype == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN"); + return false; + } + + return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack); +} + static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config) { - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; config->type = MLXSW_SP_NVE_TYPE_VXLAN; @@ -101,6 +113,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; config->udp_dport = cfg->dst_port; + config->ethertype = params->ethertype; } static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, @@ -286,7 +299,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, - .can_offload = mlxsw_sp_nve_vxlan_can_offload, + .can_offload = mlxsw_sp1_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp1_nve_vxlan_init, .fini = mlxsw_sp1_nve_vxlan_fini, @@ -299,16 +312,35 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, { char tnpc_pl[MLXSW_REG_TNPC_LEN]; - mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE, learning_en); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl); } static int +mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype) +{ + char spvid_pl[MLXSW_REG_SPVID_LEN] = {}; + u8 sver_type; + int err; + + mlxsw_reg_spvid_tport_set(spvid_pl, true); + mlxsw_reg_spvid_local_port_set(spvid_pl, + MLXSW_REG_TUNNEL_PORT_NVE); + err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type); + if (err) + return err; + + mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); +} + +static int mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_config *config) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + char spvtr_pl[MLXSW_REG_SPVTR_LEN]; u16 ul_rif_index; int err; @@ -329,8 +361,25 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tngcr_write; + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); + if (err) + goto err_spvtr_write; + + err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype); + if (err) + goto err_decap_ethertype_set; + return 0; +err_decap_ethertype_set: + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); +err_spvtr_write: + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); err_tngcr_write: mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); err_vxlan_learning_set: @@ -340,8 +389,14 @@ err_vxlan_learning_set: static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) { + char spvtr_pl[MLXSW_REG_SPVTR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + /* Set default EtherType */ + mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q); + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 9c4e17607e6a..cea42f6ed89b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2053,9 +2053,10 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, } static int -mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, u16 vid, - struct netlink_ext_ack *extack) +mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + u16 vid, u16 ethertype, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); @@ -2063,6 +2064,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ethertype, }; struct mlxsw_sp_fid *fid; int err; @@ -2101,6 +2103,15 @@ err_vni_exists: return err; } +static int +mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021Q, extack); +} + static struct net_device * mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) { @@ -2231,6 +2242,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ETH_P_8021Q, }; struct mlxsw_sp_fid *fid; int err; @@ -2335,8 +2347,8 @@ mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack) { - NL_SET_ERR_MSG_MOD(extack, "VXLAN is not supported with 802.1ad"); - return -EOPNOTSUPP; + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021AD, extack); } static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { @@ -3308,8 +3320,8 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!fid) { if (!flag_untagged || !flag_pvid) return 0; - return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, - vxlan_dev, vid, extack); + return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, + vid, extack); } /* Second case: FID is associated with the VNI and the VLAN associated @@ -3348,16 +3360,14 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!flag_untagged) return 0; - err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, - extack); + err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack); if (err) goto err_vxlan_join; return 0; err_vxlan_join: - mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, - NULL); + bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL); return err; } diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index d65872172229..6fc7483aea03 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -1112,7 +1112,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, /* setup mii state */ ks->mii.dev = netdev; - ks->mii.phy_id = 1, + ks->mii.phy_id = 1; ks->mii.phy_id_mask = 1; ks->mii.reg_num_mask = 0xf; ks->mii.mdio_read = ks8851_phy_read; diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index 31f9a82dc113..d0f6dfe0dcf3 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -47,6 +47,7 @@ config LAN743X depends on PCI select PHYLIB select CRC16 + select CRC32 help Support for the Microchip LAN743x PCI Express Gigabit Ethernet chip diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2632fe2d2448..abea8dd2b0cb 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1551,10 +1551,11 @@ int ocelot_init(struct ocelot *ocelot) SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); /* Setup flooding PGIDs */ - ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | - ANA_FLOODING_FLD_BROADCAST(PGID_MC) | - ANA_FLOODING_FLD_UNICAST(PGID_UC), - ANA_FLOODING, 0); + for (i = 0; i < ocelot->num_flooding_pgids; i++) + ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | + ANA_FLOODING_FLD_BROADCAST(PGID_MC) | + ANA_FLOODING_FLD_UNICAST(PGID_UC), + ANA_FLOODING, i); ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index dc00772950e5..1e7729421a82 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -1254,6 +1254,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) } ocelot->num_phys_ports = of_get_child_count(ports); + ocelot->num_flooding_pgids = 1; ocelot->vcap = vsc7514_vcap_props; ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE; diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index d8b99d6a0356..b82758d5beed 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -22,6 +22,7 @@ config NFP depends on VXLAN || VXLAN=n depends on TLS && TLS_DEVICE || TLS_DEVICE=n select NET_DEVLINK + select CRC32 help This driver supports the Netronome(R) NFP4000/NFP6000 based cards working as a advanced Ethernet NIC. It works with both diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 9b32ae46011c..84d66d138c3d 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -492,7 +492,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev, goto err_cnt_ign; } - switch (iph->version) { + switch (ipv6h->version) { case 4: sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b4acf2f41e84..f21fb573ea3e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3562,9 +3562,6 @@ static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) struct nfp_net_dp *dp; int err; - if (!xdp_attachment_flags_ok(&nn->xdp, bpf)) - return -EBUSY; - if (!prog == !nn->dp.xdp_prog) { WRITE_ONCE(nn->dp.xdp_prog, prog); xdp_attachment_setup(&nn->xdp, bpf); @@ -3593,9 +3590,6 @@ static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf) { int err; - if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf)) - return -EBUSY; - err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack); if (err) return err; diff --git a/drivers/net/ethernet/nxp/Kconfig b/drivers/net/ethernet/nxp/Kconfig index ee83a71c2509..c84997db828c 100644 --- a/drivers/net/ethernet/nxp/Kconfig +++ b/drivers/net/ethernet/nxp/Kconfig @@ -3,6 +3,7 @@ config LPC_ENET tristate "NXP ethernet MAC on LPC devices" depends on ARCH_LPC32XX || COMPILE_TEST select PHYLIB + select CRC32 help Say Y or M here if you want to use the NXP ethernet MAC included on some NXP LPC devices. You can safely enable this option for LPC32xx diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 35c72d4a78b3..0832bedcb3b4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -738,16 +738,11 @@ static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct ionic_lif *lif = netdev_priv(netdev); - int err; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - err = ionic_lif_rss_config(lif, lif->rss_types, key, indir); - if (err) - return err; - - return 0; + return ionic_lif_rss_config(lif, lif->rss_types, key, indir); } static int ionic_set_tunable(struct net_device *dev, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 30e52f969759..dd03be3fc82a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -2112,7 +2112,6 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - int retval; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); @@ -2125,11 +2124,7 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) qlcnic_83xx_disable_mbx_intr(adapter); cancel_delayed_work_sync(&adapter->idc_aen_work); - retval = pci_save_state(pdev); - if (retval) - return retval; - - return 0; + return pci_save_state(pdev); } static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index fcdecddb2812..8d51b0cb545c 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -26,7 +26,7 @@ static int rmnet_is_real_dev_registered(const struct net_device *real_dev) } /* Needs rtnl lock */ -static struct rmnet_port* +struct rmnet_port* rmnet_get_port_rtnl(const struct net_device *real_dev) { return rtnl_dereference(real_dev->rx_handler_data); @@ -253,7 +253,10 @@ static int rmnet_config_notify_cb(struct notifier_block *nb, netdev_dbg(real_dev, "Kernel unregister\n"); rmnet_force_unassociate_device(real_dev); break; - + case NETDEV_CHANGEMTU: + if (rmnet_vnd_validate_real_dev_mtu(real_dev)) + return NOTIFY_BAD; + break; default: break; } @@ -329,9 +332,17 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_FLAGS]) { struct ifla_rmnet_flags *flags; + u32 old_data_format; + old_data_format = port->data_format; flags = nla_data(data[IFLA_RMNET_FLAGS]); port->data_format = flags->flags & flags->mask; + + if (rmnet_vnd_update_dev_mtu(port, real_dev)) { + port->data_format = old_data_format; + NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev"); + return -EINVAL; + } } return 0; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index be515982d628..8d8d4690a074 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -73,4 +73,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, struct netlink_ext_ack *extack); int rmnet_del_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev); +struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev); #endif /* _RMNET_CONFIG_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index ca1535ebb6e7..41fbd2ceeede 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -59,9 +59,30 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int rmnet_vnd_headroom(struct rmnet_port *port) +{ + u32 headroom; + + headroom = sizeof(struct rmnet_map_header); + + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) + headroom += sizeof(struct rmnet_map_ul_csum_header); + + return headroom; +} + static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) { - if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct rmnet_port *port; + u32 headroom; + + port = rmnet_get_port_rtnl(priv->real_dev); + + headroom = rmnet_vnd_headroom(port); + + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE || + new_mtu > (priv->real_dev->mtu - headroom)) return -EINVAL; rmnet_dev->mtu = new_mtu; @@ -230,6 +251,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, { struct rmnet_priv *priv = netdev_priv(rmnet_dev); + u32 headroom; int rc; if (rmnet_get_endpoint(port, id)) { @@ -243,6 +265,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, priv->real_dev = real_dev; + headroom = rmnet_vnd_headroom(port); + + if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev"); + return -EINVAL; + } + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; @@ -284,3 +313,45 @@ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) return 0; } + +int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev) +{ + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + unsigned long bkt_ep; + u32 headroom; + + port = rmnet_get_port_rtnl(real_dev); + + headroom = rmnet_vnd_headroom(port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + if (ep->egress_dev->mtu > (real_dev->mtu - headroom)) + return -1; + } + + return 0; +} + +int rmnet_vnd_update_dev_mtu(struct rmnet_port *port, + struct net_device *real_dev) +{ + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + unsigned long bkt_ep; + u32 headroom; + + headroom = rmnet_vnd_headroom(port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + if (ep->egress_dev->mtu <= (real_dev->mtu - headroom)) + continue; + + if (rmnet_vnd_change_mtu(ep->egress_dev, + real_dev->mtu - headroom)) + return -1; + } + + return 0; +}
\ No newline at end of file diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index 4967f3461ed1..dc3a4443ef0a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -18,4 +18,7 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_setup(struct net_device *dev); +int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev); +int rmnet_vnd_update_dev_mtu(struct rmnet_port *port, + struct net_device *real_dev); #endif /* _RMNET_VND_H_ */ diff --git a/drivers/net/ethernet/rocker/Kconfig b/drivers/net/ethernet/rocker/Kconfig index 99e1290e0307..2318811ff75a 100644 --- a/drivers/net/ethernet/rocker/Kconfig +++ b/drivers/net/ethernet/rocker/Kconfig @@ -19,6 +19,7 @@ if NET_VENDOR_ROCKER config ROCKER tristate "Rocker switch driver (EXPERIMENTAL)" depends on PCI && NET_SWITCHDEV && BRIDGE + select CRC32 help This driver supports Rocker switch device. diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index efef5476a577..223f69da7e95 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -246,13 +246,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) goto err_parse_dt; } - ret = dma_set_mask_and_coherent(&pdev->dev, - DMA_BIT_MASK(dwmac->ops->addr_width)); - if (ret) { - dev_err(&pdev->dev, "DMA mask set failed\n"); - goto err_dma_mask; - } - + plat_dat->addr64 = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; plat_dat->fix_mac_speed = imx_dwmac_fix_speed; @@ -272,7 +266,6 @@ static int imx_dwmac_probe(struct platform_device *pdev) err_dwmac_init: err_drv_probe: imx_dwmac_exit(pdev, plat_dat->bsp_priv); -err_dma_mask: err_parse_dt: err_match_data: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index dc0b8b6d180d..459ae715b33d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -30,7 +30,6 @@ #define PRG_ETH0_EXT_RMII_MODE 4 /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ -#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one @@ -155,8 +154,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) return -ENOMEM; clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; - clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; - clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; + clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK); + clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >> + clk_configs->m250_mux.shift; clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents, ARRAY_SIZE(mux_parents), &clk_mux_ops, &clk_configs->m250_mux.hw); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 6e30d7eb4983..0b4ee2dbb691 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -22,7 +22,7 @@ int dwmac4_dma_reset(void __iomem *ioaddr) return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, !(value & DMA_BUS_MODE_SFT_RESET), - 10000, 100000); + 10000, 1000000); } void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 67ba67ed0cb9..03e79a677c8b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -305,17 +305,13 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, static int dwmac5_rxp_disable(void __iomem *ioaddr) { u32 val; - int ret; val = readl(ioaddr + MTL_OPERATION_MODE); val &= ~MTL_FRPE; writel(val, ioaddr + MTL_OPERATION_MODE); - ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, val & RXPI, 1, 10000); - if (ret) - return ret; - return 0; } static void dwmac5_rxp_enable(void __iomem *ioaddr) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c1ac75901ce..5b1c12ff98c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1558,6 +1558,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) } /** + * stmmac_free_tx_skbufs - free TX skb buffers + * @priv: private structure + */ +static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) +{ + u32 tx_queue_cnt = priv->plat->tx_queues_to_use; + u32 queue; + + for (queue = 0; queue < tx_queue_cnt; queue++) + dma_free_tx_skbufs(priv, queue); +} + +/** * free_dma_rx_desc_resources - free RX dma desc resources * @priv: private structure */ @@ -2925,9 +2938,6 @@ static int stmmac_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; - if (priv->eee_enabled) - del_timer_sync(&priv->eee_ctrl_timer); - if (device_may_wakeup(priv->device)) phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ @@ -2946,6 +2956,11 @@ static int stmmac_release(struct net_device *dev) if (priv->lpi_irq > 0) free_irq(priv->lpi_irq, dev); + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + /* Stop TX/RX DMA and clear the descriptors */ stmmac_stop_all_dma(priv); @@ -4960,6 +4975,14 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "SPH feature enabled\n"); } + /* The current IP register MAC_HW_Feature1[ADDR64] only define + * 32/40/64 bit width, but some SOC support others like i.MX8MP + * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. + * So overwrite dma_cap.addr64 according to HW real design. + */ + if (priv->plat->addr64) + priv->dma_cap.addr64 = priv->plat->addr64; + if (priv->dma_cap.addr64) { ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(priv->dma_cap.addr64)); @@ -5172,6 +5195,11 @@ int stmmac_suspend(struct device *dev) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) hrtimer_cancel(&priv->tx_queue[chan].txtimer); + if (priv->eee_enabled) { + priv->tx_path_in_lpi_mode = false; + del_timer_sync(&priv->eee_ctrl_timer); + } + /* Stop TX/RX DMA */ stmmac_stop_all_dma(priv); @@ -5277,11 +5305,20 @@ int stmmac_resume(struct device *dev) return ret; } + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + rtnl_lock(); + phylink_start(priv->phylink); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); + rtnl_unlock(); + } + rtnl_lock(); mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); + stmmac_free_tx_skbufs(priv); stmmac_clear_descriptors(priv); stmmac_hw_setup(ndev, false); @@ -5295,14 +5332,6 @@ int stmmac_resume(struct device *dev) mutex_unlock(&priv->lock); rtnl_unlock(); - if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { - rtnl_lock(); - phylink_start(priv->phylink); - /* We may have called phylink_speed_down before */ - phylink_speed_up(priv->phylink); - rtnl_unlock(); - } - phylink_mac_change(priv->phylink, true); netif_device_attach(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b2a707e2ef43..d64116e0543e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -365,6 +365,9 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->name = "stmmac"; + if (priv->plat->has_gmac4) + new_bus->probe_capabilities = MDIOBUS_C22_C45; + if (priv->plat->has_xgmac) { new_bus->read = &stmmac_xgmac2_mdio_read; new_bus->write = &stmmac_xgmac2_mdio_write; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index cc27d660a818..f5bed4d26e80 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -209,17 +209,11 @@ err_unfill: static int tc_delete_knode(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls) { - int ret; - /* Set entry and fragments as not used */ tc_unfill_entry(priv, cls); - ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, - priv->tc_entries_max); - if (ret) - return ret; - - return 0; + return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); } static int tc_setup_cls_u32(struct stmmac_priv *priv, diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 6dd73bd0f458..99f44563e10f 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1265,9 +1265,6 @@ static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf) if (!priv->xdpi.prog && !prog) return 0; - if (!xdp_attachment_flags_ok(&priv->xdpi, bpf)) - return -EBUSY; - WRITE_ONCE(priv->xdp_prog, prog); xdp_attachment_setup(&priv->xdpi, bpf); diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 702fdc393da0..cfff3d48807a 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -381,9 +381,9 @@ static int davinci_mdio_probe(struct platform_device *pdev) } data->bus->name = dev_name(dev); - data->bus->read = davinci_mdio_read, - data->bus->write = davinci_mdio_write, - data->bus->reset = davinci_mdio_reset, + data->bus->read = davinci_mdio_read; + data->bus->write = davinci_mdio_write; + data->bus->reset = davinci_mdio_reset; data->bus->parent = dev; data->bus->priv = data; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60c199fcb91e..030185301014 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1351,7 +1351,6 @@ static int temac_probe(struct platform_device *pdev) struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np; struct temac_local *lp; struct net_device *ndev; - struct resource *res; const void *addr; __be32 *p; bool little_endian; @@ -1500,13 +1499,11 @@ static int temac_probe(struct platform_device *pdev) of_node_put(dma_np); } else if (pdata) { /* 2nd memory resource specifies DMA registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - lp->sdma_regs = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!lp->sdma_regs) { + lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(lp->sdma_regs)) { dev_err(&pdev->dev, "could not map DMA registers\n"); - return -ENOMEM; + return PTR_ERR(lp->sdma_regs); } if (pdata->dma_little_endian) { lp->dma_in = temac_dma_in32_le; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 627c33333866..5523f069b9a5 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -258,21 +258,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, skb_dst_set(skb, &tun_dst->dst); /* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) - goto rx_error; - - switch (skb_protocol(skb, true)) { - case htons(ETH_P_IP): - if (pskb_may_pull(skb, sizeof(struct iphdr))) - goto rx_error; - break; - case htons(ETH_P_IPV6): - if (pskb_may_pull(skb, sizeof(struct ipv6hdr))) - goto rx_error; - break; - default: - goto rx_error; + if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { + geneve->dev->stats.rx_errors++; + goto drop; } + oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -309,8 +299,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, dev_sw_netstats_rx_add(geneve->dev, len); return; -rx_error: - geneve->dev->stats.rx_errors++; drop: /* Consume bad packet */ kfree_skb(skb); diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index e8599bb948c0..6c3ed5b17b80 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -156,6 +156,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, /* The allocator will give us a power-of-2 number of pages. But we * can't guarantee that, so request it. That way we won't waste any * memory that would be available beyond the required space. + * + * Note that gsi_trans_pool_exit_dma() assumes the total allocated + * size is exactly (count * size). */ total_size = get_order(total_size) << PAGE_SHIFT; @@ -175,7 +178,9 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool) { - dma_free_coherent(dev, pool->size, pool->base, pool->addr); + size_t total_size = pool->count * pool->size; + + dma_free_coherent(dev, total_size, pool->base, pool->addr); memset(pool, 0, sizeof(*pool)); } diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index d2c3f273c233..2fc64483f275 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -413,7 +413,7 @@ static void ipa_client_init_driver_work(struct work_struct *work) int ret; ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work); - qmi = &ipa_qmi->client_handle, + qmi = &ipa_qmi->client_handle; ipa = container_of(ipa_qmi, struct ipa, qmi); dev = &ipa->pdev->dev; diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 2e90512f3bbe..90aafb56f140 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -63,15 +63,20 @@ static int nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) { struct nsim_bpf_bound_prog *state; + int ret = 0; state = env->prog->aux->offload->dev_priv; if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx) msleep(state->nsim_dev->bpf_bind_verifier_delay); - if (insn_idx == env->prog->len - 1) + if (insn_idx == env->prog->len - 1) { pr_vlog(env, "Hello from netdevsim!\n"); - return 0; + if (!state->nsim_dev->bpf_bind_verifier_accept) + ret = -EOPNOTSUPP; + } + + return ret; } static int nsim_bpf_finalize(struct bpf_verifier_env *env) @@ -190,9 +195,6 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, { int err; - if (!xdp_attachment_flags_ok(xdp, bpf)) - return -EBUSY; - if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); return -EOPNOTSUPP; @@ -598,6 +600,9 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev) &nsim_dev->bpf_bind_accept); debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir, &nsim_dev->bpf_bind_verifier_delay); + nsim_dev->bpf_bind_verifier_accept = true; + debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir, + &nsim_dev->bpf_bind_verifier_accept); return 0; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 19b1e6ef5573..48163c5f2ec9 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -197,6 +197,7 @@ struct nsim_dev { struct dentry *take_snapshot; struct bpf_offload_dev *bpf_dev; bool bpf_bind_accept; + bool bpf_bind_verifier_accept; u32 bpf_bind_verifier_delay; struct dentry *ddir_bpf_bound_progs; u32 prog_id_gen; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 58014feedf6c..20b91f5dfc6e 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -334,14 +334,13 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, } /* If we haven't discovered any modes that this module supports, try - * the encoding and bitrate to determine supported modes. Some BiDi - * modules (eg, 1310nm/1550nm) are not 1000BASE-BX compliant due to - * the differing wavelengths, so do not set any transceiver bits. + * the bitrate to determine supported modes. Some BiDi modules (eg, + * 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing + * wavelengths, so do not set any transceiver bits. */ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) { - /* If the encoding and bit rate allows 1000baseX */ - if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom && - br_min <= 1300 && br_max >= 1200) + /* If the bit rate allows 1000baseX */ + if (br_nom && br_min <= 1300 && br_max >= 1200) phylink_set(modes, 1000baseX_Full); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 34aa196b7465..91d74c1a920a 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -219,6 +219,7 @@ struct sfp { struct sfp_bus *sfp_bus; struct phy_device *mod_phy; const struct sff_data *type; + size_t i2c_block_size; u32 max_power_mW; unsigned int (*get_state)(struct sfp *); @@ -335,10 +336,19 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, size_t len) { struct i2c_msg msgs[2]; - u8 bus_addr = a2 ? 0x51 : 0x50; + size_t block_size; size_t this_len; + u8 bus_addr; int ret; + if (a2) { + block_size = 16; + bus_addr = 0x51; + } else { + block_size = sfp->i2c_block_size; + bus_addr = 0x50; + } + msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = 1; @@ -350,8 +360,8 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, while (len) { this_len = len; - if (this_len > 16) - this_len = 16; + if (this_len > block_size) + this_len = block_size; msgs[1].len = this_len; @@ -1632,6 +1642,28 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) return 0; } +/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a + * single read. Switch back to reading 16 byte blocks unless we have + * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly, + * some VSOL V2801F have the vendor name changed to OEM. + */ +static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base) +{ + if (!memcmp(base->vendor_name, "VSOL ", 16)) + return 1; + if (!memcmp(base->vendor_name, "OEM ", 16) && + !memcmp(base->vendor_pn, "V2801F ", 16)) + return 1; + + /* Some modules can't cope with long reads */ + return 16; +} + +static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base) +{ + sfp->i2c_block_size = sfp_quirk_i2c_block_size(base); +} + static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id) { u8 check; @@ -1673,14 +1705,20 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) u8 check; int ret; - ret = sfp_read(sfp, false, 0, &id, sizeof(id)); + /* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte + * reads from the EEPROM, so start by reading the base identifying + * information one byte at a time. + */ + sfp->i2c_block_size = 1; + + ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base)); if (ret < 0) { if (report) dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); return -EAGAIN; } - if (ret != sizeof(id)) { + if (ret != sizeof(id.base)) { dev_err(sfp->dev, "EEPROM short read: %d\n", ret); return -EAGAIN; } @@ -1719,6 +1757,21 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) } } + /* Apply any early module-specific quirks */ + sfp_quirks_base(sfp, &id.base); + + ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext)); + if (ret < 0) { + if (report) + dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); + return -EAGAIN; + } + + if (ret != sizeof(id.ext)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", ret); + return -EAGAIN; + } + check = sfp_check(&id.ext, sizeof(id.ext) - 1); if (check != id.ext.cc_ext) { if (cotsworks) { diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 7d005896a0f9..09c27f7773f9 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -174,7 +174,8 @@ struct channel { struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */ struct list_head clist; /* link in list of channels per unit */ - rwlock_t upl; /* protects `ppp' */ + rwlock_t upl; /* protects `ppp' and 'bridge' */ + struct channel __rcu *bridge; /* "bridged" ppp channel */ #ifdef CONFIG_PPP_MULTILINK u8 avail; /* flag used in multilink stuff */ u8 had_frag; /* >= 1 fragments have been sent */ @@ -606,6 +607,83 @@ static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p) #endif #endif +/* Bridge one PPP channel to another. + * When two channels are bridged, ppp_input on one channel is redirected to + * the other's ops->start_xmit handler. + * In order to safely bridge channels we must reject channels which are already + * part of a bridge instance, or which form part of an existing unit. + * Once successfully bridged, each channel holds a reference on the other + * to prevent it being freed while the bridge is extant. + */ +static int ppp_bridge_channels(struct channel *pch, struct channel *pchb) +{ + write_lock_bh(&pch->upl); + if (pch->ppp || + rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) { + write_unlock_bh(&pch->upl); + return -EALREADY; + } + rcu_assign_pointer(pch->bridge, pchb); + write_unlock_bh(&pch->upl); + + write_lock_bh(&pchb->upl); + if (pchb->ppp || + rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) { + write_unlock_bh(&pchb->upl); + goto err_unset; + } + rcu_assign_pointer(pchb->bridge, pch); + write_unlock_bh(&pchb->upl); + + refcount_inc(&pch->file.refcnt); + refcount_inc(&pchb->file.refcnt); + + return 0; + +err_unset: + write_lock_bh(&pch->upl); + RCU_INIT_POINTER(pch->bridge, NULL); + write_unlock_bh(&pch->upl); + synchronize_rcu(); + return -EALREADY; +} + +static int ppp_unbridge_channels(struct channel *pch) +{ + struct channel *pchb, *pchbb; + + write_lock_bh(&pch->upl); + pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)); + if (!pchb) { + write_unlock_bh(&pch->upl); + return -EINVAL; + } + RCU_INIT_POINTER(pch->bridge, NULL); + write_unlock_bh(&pch->upl); + + /* Only modify pchb if phcb->bridge points back to pch. + * If not, it implies that there has been a race unbridging (and possibly + * even rebridging) pchb. We should leave pchb alone to avoid either a + * refcount underflow, or breaking another established bridge instance. + */ + write_lock_bh(&pchb->upl); + pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl)); + if (pchbb == pch) + RCU_INIT_POINTER(pchb->bridge, NULL); + write_unlock_bh(&pchb->upl); + + synchronize_rcu(); + + if (pchbb == pch) + if (refcount_dec_and_test(&pch->file.refcnt)) + ppp_destroy_channel(pch); + + if (refcount_dec_and_test(&pchb->file.refcnt)) + ppp_destroy_channel(pchb); + + return 0; +} + static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ppp_file *pf; @@ -641,8 +719,9 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } if (pf->kind == CHANNEL) { - struct channel *pch; + struct channel *pch, *pchb; struct ppp_channel *chan; + struct ppp_net *pn; pch = PF_TO_CHANNEL(pf); @@ -657,6 +736,31 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = ppp_disconnect_channel(pch); break; + case PPPIOCBRIDGECHAN: + if (get_user(unit, p)) + break; + err = -ENXIO; + pn = ppp_pernet(current->nsproxy->net_ns); + spin_lock_bh(&pn->all_channels_lock); + pchb = ppp_find_channel(pn, unit); + /* Hold a reference to prevent pchb being freed while + * we establish the bridge. + */ + if (pchb) + refcount_inc(&pchb->file.refcnt); + spin_unlock_bh(&pn->all_channels_lock); + if (!pchb) + break; + err = ppp_bridge_channels(pch, pchb); + /* Drop earlier refcount now bridge establishment is complete */ + if (refcount_dec_and_test(&pchb->file.refcnt)) + ppp_destroy_channel(pchb); + break; + + case PPPIOCUNBRIDGECHAN: + err = ppp_unbridge_channels(pch); + break; + default: down_read(&pch->chan_sem); chan = pch->chan; @@ -2089,6 +2193,40 @@ static bool ppp_decompress_proto(struct sk_buff *skb) return pskb_may_pull(skb, 2); } +/* Attempt to handle a frame via. a bridged channel, if one exists. + * If the channel is bridged, the frame is consumed by the bridge. + * If not, the caller must handle the frame by normal recv mechanisms. + * Returns true if the frame is consumed, false otherwise. + */ +static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb) +{ + struct channel *pchb; + + rcu_read_lock(); + pchb = rcu_dereference(pch->bridge); + if (!pchb) + goto out_rcu; + + spin_lock(&pchb->downl); + if (!pchb->chan) { + /* channel got unregistered */ + kfree_skb(skb); + goto outl; + } + + skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net)); + if (!pchb->chan->ops->start_xmit(pchb->chan, skb)) + kfree_skb(skb); + +outl: + spin_unlock(&pchb->downl); +out_rcu: + rcu_read_unlock(); + + /* If pchb is set then we've consumed the packet */ + return !!pchb; +} + void ppp_input(struct ppp_channel *chan, struct sk_buff *skb) { @@ -2100,6 +2238,10 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) return; } + /* If the channel is bridged, transmit via. bridge */ + if (ppp_channel_bridge_input(pch, skb)) + return; + read_lock_bh(&pch->upl); if (!ppp_decompress_proto(skb)) { kfree_skb(skb); @@ -2796,8 +2938,11 @@ ppp_unregister_channel(struct ppp_channel *chan) list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); + ppp_unbridge_channels(pch); + pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); + if (refcount_dec_and_test(&pch->file.refcnt)) ppp_destroy_channel(pch); } @@ -3270,7 +3415,8 @@ ppp_connect_channel(struct channel *pch, int unit) goto out; write_lock_bh(&pch->upl); ret = -EINVAL; - if (pch->ppp) + if (pch->ppp || + rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) goto outl; ppp_lock(ppp); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 3160443ef3b9..ae83d66195a5 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1241,7 +1241,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; net->handler.uuid = &tbnet_svc_uuid; - net->handler.callback = tbnet_handle_packet, + net->handler.callback = tbnet_handle_packet; net->handler.data = net; tb_register_protocol_handler(&net->handler); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index dba847f28096..02e6bbb17b15 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -275,7 +275,7 @@ static const struct net_device_ops usbpn_ops = { static void usbpn_setup(struct net_device *dev) { dev->features = 0; - dev->netdev_ops = &usbpn_ops, + dev->netdev_ops = &usbpn_ops; dev->header_ops = &phonet_header_ops; dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 259d5cbacf2c..6d9130859c55 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1237,6 +1237,61 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, return skb; } +static int vrf_prepare_mac_header(struct sk_buff *skb, + struct net_device *vrf_dev, u16 proto) +{ + struct ethhdr *eth; + int err; + + /* in general, we do not know if there is enough space in the head of + * the packet for hosting the mac header. + */ + err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev)); + if (unlikely(err)) + /* no space in the skb head */ + return -ENOBUFS; + + __skb_push(skb, ETH_HLEN); + eth = (struct ethhdr *)skb->data; + + skb_reset_mac_header(skb); + + /* we set the ethernet destination and the source addresses to the + * address of the VRF device. + */ + ether_addr_copy(eth->h_dest, vrf_dev->dev_addr); + ether_addr_copy(eth->h_source, vrf_dev->dev_addr); + eth->h_proto = htons(proto); + + /* the destination address of the Ethernet frame corresponds to the + * address set on the VRF interface; therefore, the packet is intended + * to be processed locally. + */ + skb->protocol = eth->h_proto; + skb->pkt_type = PACKET_HOST; + + skb_postpush_rcsum(skb, skb->data, ETH_HLEN); + + skb_pull_inline(skb, ETH_HLEN); + + return 0; +} + +/* prepare and add the mac header to the packet if it was not set previously. + * In this way, packet sniffers such as tcpdump can parse the packet correctly. + * If the mac header was already set, the original mac header is left + * untouched and the function returns immediately. + */ +static int vrf_add_mac_header_if_unset(struct sk_buff *skb, + struct net_device *vrf_dev, + u16 proto) +{ + if (skb_mac_header_was_set(skb)) + return 0; + + return vrf_prepare_mac_header(skb, vrf_dev, proto); +} + #if IS_ENABLED(CONFIG_IPV6) /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails @@ -1310,72 +1365,23 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, skb_dst_set(skb, &rt6->dst); } -static int vrf_prepare_mac_header(struct sk_buff *skb, - struct net_device *vrf_dev, u16 proto) -{ - struct ethhdr *eth; - int err; - - /* in general, we do not know if there is enough space in the head of - * the packet for hosting the mac header. - */ - err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev)); - if (unlikely(err)) - /* no space in the skb head */ - return -ENOBUFS; - - __skb_push(skb, ETH_HLEN); - eth = (struct ethhdr *)skb->data; - - skb_reset_mac_header(skb); - - /* we set the ethernet destination and the source addresses to the - * address of the VRF device. - */ - ether_addr_copy(eth->h_dest, vrf_dev->dev_addr); - ether_addr_copy(eth->h_source, vrf_dev->dev_addr); - eth->h_proto = htons(proto); - - /* the destination address of the Ethernet frame corresponds to the - * address set on the VRF interface; therefore, the packet is intended - * to be processed locally. - */ - skb->protocol = eth->h_proto; - skb->pkt_type = PACKET_HOST; - - skb_postpush_rcsum(skb, skb->data, ETH_HLEN); - - skb_pull_inline(skb, ETH_HLEN); - - return 0; -} - -/* prepare and add the mac header to the packet if it was not set previously. - * In this way, packet sniffers such as tcpdump can parse the packet correctly. - * If the mac header was already set, the original mac header is left - * untouched and the function returns immediately. - */ -static int vrf_add_mac_header_if_unset(struct sk_buff *skb, - struct net_device *vrf_dev, - u16 proto) -{ - if (skb_mac_header_was_set(skb)) - return 0; - - return vrf_prepare_mac_header(skb, vrf_dev, proto); -} - static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { int orig_iif = skb->skb_iif; bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); bool is_ndisc = ipv6_ndisc_frame(skb); + bool is_ll_src; /* loopback, multicast & non-ND link-local traffic; do not push through - * packet taps again. Reset pkt_type for upper layers to process skb + * packet taps again. Reset pkt_type for upper layers to process skb. + * for packets with lladdr src, however, skip so that the dst can be + * determine at input using original ifindex in the case that daddr + * needs strict */ - if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { + is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; + if (skb->pkt_type == PACKET_LOOPBACK || + (need_strict && !is_ndisc && !is_ll_src)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e1e44d68ac4b..a8ad710629e6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2477,7 +2477,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, &fl6, NULL); - if (unlikely(IS_ERR(ndst))) { + if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index f52b9fed0593..bb164805804e 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -77,7 +77,6 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) } skb_push(skb, 1); - skb_reset_network_header(skb); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -118,7 +117,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); - skb_reset_network_header(skb); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index b6be2454b8bd..605fe555e157 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -55,6 +55,9 @@ struct lapbethdev { static LIST_HEAD(lapbeth_devices); +static void lapbeth_connected(struct net_device *dev, int reason); +static void lapbeth_disconnected(struct net_device *dev, int reason); + /* ------------------------------------------------------------------------ */ /* @@ -167,11 +170,17 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, case X25_IFACE_DATA: break; case X25_IFACE_CONNECT: - if ((err = lapb_connect_request(dev)) != LAPB_OK) + err = lapb_connect_request(dev); + if (err == LAPB_CONNECTED) + lapbeth_connected(dev, LAPB_OK); + else if (err != LAPB_OK) pr_err("lapb_connect_request error: %d\n", err); goto drop; case X25_IFACE_DISCONNECT: - if ((err = lapb_disconnect_request(dev)) != LAPB_OK) + err = lapb_disconnect_request(dev); + if (err == LAPB_NOTCONNECTED) + lapbeth_disconnected(dev, LAPB_OK); + else if (err != LAPB_OK) pr_err("lapb_disconnect_request err: %d\n", err); fallthrough; default: diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index e1bdde105f24..42f1f610ac2c 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -179,6 +179,8 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct s3fwrn5_i2c_phy *phy; + struct irq_data *irq_data; + unsigned long irqflags; int ret; phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL); @@ -212,8 +214,11 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client, if (ret < 0) return ret; + irq_data = irq_get_irq_data(client->irq); + irqflags = irqd_get_trigger_type(irq_data) | IRQF_ONESHOT; + ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL, - s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + s3fwrn5_i2c_irq_thread_fn, irqflags, S3FWRN5_I2C_DRIVER_NAME, phy); if (ret) s3fwrn5_remove(phy->common.ndev); diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index d6b849552a1e..9c65d560d48f 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -286,14 +286,76 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) { /* - * The signal type is GPIO if the signal name has "GPI" as a prefix. - * strncmp (rather than strcmp) is used to implement the prefix - * requirement. + * We need to differentiate between GPIO and non-GPIO signals to + * implement the gpio_request_enable() interface. For better or worse + * the ASPEED pinctrl driver uses the expression names to determine + * whether an expression will mux a pin for GPIO. * - * expr->signal might look like "GPIOB1" in the GPIO case. - * expr->signal might look like "GPIT0" in the GPI case. + * Generally we have the following - A GPIO such as B1 has: + * + * - expr->signal set to "GPIOB1" + * - expr->function set to "GPIOB1" + * + * Using this fact we can determine whether the provided expression is + * a GPIO expression by testing the signal name for the string prefix + * "GPIO". + * + * However, some GPIOs are input-only, and the ASPEED datasheets name + * them differently. An input-only GPIO such as T0 has: + * + * - expr->signal set to "GPIT0" + * - expr->function set to "GPIT0" + * + * It's tempting to generalise the prefix test from "GPIO" to "GPI" to + * account for both GPIOs and GPIs, but in doing so we run aground on + * another feature: + * + * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO + * function where the input state of one pin is replicated as the + * output state of another (as if they were shorted together - a mux + * configuration that is typically enabled by hardware strapping). + * This feature allows the BMC to pass e.g. power button state through + * to the host while the BMC is yet to boot, but take control of the + * button state once the BMC has booted by muxing each pin as a + * separate, pin-specific GPIO. + * + * Conceptually this pass-through mode is a form of GPIO and is named + * as such in the datasheets, e.g. "GPID0". This naming similarity + * trips us up with the simple GPI-prefixed-signal-name scheme + * discussed above, as the pass-through configuration is not what we + * want when muxing a pin as GPIO for the GPIO subsystem. + * + * On e.g. the AST2400, a pass-through function "GPID0" is grouped on + * balls A18 and D16, where we have: + * + * For ball A18: + * - expr->signal set to "GPID0IN" + * - expr->function set to "GPID0" + * + * For ball D16: + * - expr->signal set to "GPID0OUT" + * - expr->function set to "GPID0" + * + * By contrast, the pin-specific GPIO expressions for the same pins are + * as follows: + * + * For ball A18: + * - expr->signal looks like "GPIOD0" + * - expr->function looks like "GPIOD0" + * + * For ball D16: + * - expr->signal looks like "GPIOD1" + * - expr->function looks like "GPIOD1" + * + * Testing both the signal _and_ function names gives us the means + * differentiate the pass-through GPIO pinmux configuration from the + * pin-specific configuration that the GPIO subsystem is after: An + * expression is a pin-specific (non-pass-through) GPIO configuration + * if the signal prefix is "GPI" and the signal name matches the + * function name. */ - return strncmp(expr->signal, "GPI", 3) == 0; + return !strncmp(expr->signal, "GPI", 3) && + !strcmp(expr->signal, expr->function); } static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs) diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h index f86739e800c3..dba5875ff276 100644 --- a/drivers/pinctrl/aspeed/pinmux-aspeed.h +++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h @@ -452,10 +452,11 @@ struct aspeed_sig_desc { * evaluation of the descriptors. * * @signal: The signal name for the priority level on the pin. If the signal - * type is GPIO, then the signal name must begin with the string - * "GPIO", e.g. GPIOA0, GPIOT4 etc. + * type is GPIO, then the signal name must begin with the + * prefix "GPI", e.g. GPIOA0, GPIT0 etc. * @function: The name of the function the signal participates in for the - * associated expression + * associated expression. For pin-specific GPIO, the function + * name must match the signal name. * @ndescs: The number of signal descriptors in the expression * @descs: Pointer to an array of signal descriptors that comprise the * function expression diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index d49aab3cfbaa..394a421a19d5 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1049,7 +1049,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, break; case PIN_CONFIG_INPUT_DEBOUNCE: debounce = readl(db_reg); - debounce &= ~BYT_DEBOUNCE_PULSE_MASK; if (arg) conf |= BYT_DEBOUNCE_EN; @@ -1058,24 +1057,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, switch (arg) { case 375: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_375US; break; case 750: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_750US; break; case 1500: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_1500US; break; case 3000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_3MS; break; case 6000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_6MS; break; case 12000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_12MS; break; case 24000: + debounce &= ~BYT_DEBOUNCE_PULSE_MASK; debounce |= BYT_DEBOUNCE_PULSE_24MS; break; default: diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 1c10ab184783..b6ef1911c1dd 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -442,8 +442,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value |= PADCFG0_PMODE_GPIO; /* Disable input and output buffers */ - value &= ~PADCFG0_GPIORXDIS; - value &= ~PADCFG0_GPIOTXDIS; + value |= PADCFG0_GPIORXDIS; + value |= PADCFG0_GPIOTXDIS; /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c index 9bd0e8e6310c..ec435b7ab392 100644 --- a/drivers/pinctrl/intel/pinctrl-jasperlake.c +++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c @@ -16,7 +16,7 @@ #define JSL_PAD_OWN 0x020 #define JSL_PADCFGLOCK 0x080 -#define JSL_HOSTSW_OWN 0x0b0 +#define JSL_HOSTSW_OWN 0x0c0 #define JSL_GPI_IS 0x100 #define JSL_GPI_IE 0x120 @@ -65,252 +65,263 @@ static const struct pinctrl_pin_desc jsl_pins[] = { PINCTRL_PIN(17, "EMMC_CLK"), PINCTRL_PIN(18, "EMMC_RESETB"), PINCTRL_PIN(19, "A4WP_PRESENT"), + /* SPI */ + PINCTRL_PIN(20, "SPI0_IO_2"), + PINCTRL_PIN(21, "SPI0_IO_3"), + PINCTRL_PIN(22, "SPI0_MOSI_IO_0"), + PINCTRL_PIN(23, "SPI0_MISO_IO_1"), + PINCTRL_PIN(24, "SPI0_TPM_CSB"), + PINCTRL_PIN(25, "SPI0_FLASH_0_CSB"), + PINCTRL_PIN(26, "SPI0_FLASH_1_CSB"), + PINCTRL_PIN(27, "SPI0_CLK"), + PINCTRL_PIN(28, "SPI0_CLK_LOOPBK"), /* GPP_B */ - PINCTRL_PIN(20, "CORE_VID_0"), - PINCTRL_PIN(21, "CORE_VID_1"), - PINCTRL_PIN(22, "VRALERTB"), - PINCTRL_PIN(23, "CPU_GP_2"), - PINCTRL_PIN(24, "CPU_GP_3"), - PINCTRL_PIN(25, "SRCCLKREQB_0"), - PINCTRL_PIN(26, "SRCCLKREQB_1"), - PINCTRL_PIN(27, "SRCCLKREQB_2"), - PINCTRL_PIN(28, "SRCCLKREQB_3"), - PINCTRL_PIN(29, "SRCCLKREQB_4"), - PINCTRL_PIN(30, "SRCCLKREQB_5"), - PINCTRL_PIN(31, "PMCALERTB"), - PINCTRL_PIN(32, "SLP_S0B"), - PINCTRL_PIN(33, "PLTRSTB"), - PINCTRL_PIN(34, "SPKR"), - PINCTRL_PIN(35, "GSPI0_CS0B"), - PINCTRL_PIN(36, "GSPI0_CLK"), - PINCTRL_PIN(37, "GSPI0_MISO"), - PINCTRL_PIN(38, "GSPI0_MOSI"), - PINCTRL_PIN(39, "GSPI1_CS0B"), - PINCTRL_PIN(40, "GSPI1_CLK"), - PINCTRL_PIN(41, "GSPI1_MISO"), - PINCTRL_PIN(42, "GSPI1_MOSI"), - PINCTRL_PIN(43, "DDSP_HPD_A"), - PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"), - PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"), + PINCTRL_PIN(29, "CORE_VID_0"), + PINCTRL_PIN(30, "CORE_VID_1"), + PINCTRL_PIN(31, "VRALERTB"), + PINCTRL_PIN(32, "CPU_GP_2"), + PINCTRL_PIN(33, "CPU_GP_3"), + PINCTRL_PIN(34, "SRCCLKREQB_0"), + PINCTRL_PIN(35, "SRCCLKREQB_1"), + PINCTRL_PIN(36, "SRCCLKREQB_2"), + PINCTRL_PIN(37, "SRCCLKREQB_3"), + PINCTRL_PIN(38, "SRCCLKREQB_4"), + PINCTRL_PIN(39, "SRCCLKREQB_5"), + PINCTRL_PIN(40, "PMCALERTB"), + PINCTRL_PIN(41, "SLP_S0B"), + PINCTRL_PIN(42, "PLTRSTB"), + PINCTRL_PIN(43, "SPKR"), + PINCTRL_PIN(44, "GSPI0_CS0B"), + PINCTRL_PIN(45, "GSPI0_CLK"), + PINCTRL_PIN(46, "GSPI0_MISO"), + PINCTRL_PIN(47, "GSPI0_MOSI"), + PINCTRL_PIN(48, "GSPI1_CS0B"), + PINCTRL_PIN(49, "GSPI1_CLK"), + PINCTRL_PIN(50, "GSPI1_MISO"), + PINCTRL_PIN(51, "GSPI1_MOSI"), + PINCTRL_PIN(52, "DDSP_HPD_A"), + PINCTRL_PIN(53, "GSPI0_CLK_LOOPBK"), + PINCTRL_PIN(54, "GSPI1_CLK_LOOPBK"), /* GPP_A */ - PINCTRL_PIN(46, "ESPI_IO_0"), - PINCTRL_PIN(47, "ESPI_IO_1"), - PINCTRL_PIN(48, "ESPI_IO_2"), - PINCTRL_PIN(49, "ESPI_IO_3"), - PINCTRL_PIN(50, "ESPI_CSB"), - PINCTRL_PIN(51, "ESPI_CLK"), - PINCTRL_PIN(52, "ESPI_RESETB"), - PINCTRL_PIN(53, "SMBCLK"), - PINCTRL_PIN(54, "SMBDATA"), - PINCTRL_PIN(55, "SMBALERTB"), - PINCTRL_PIN(56, "CPU_GP_0"), - PINCTRL_PIN(57, "CPU_GP_1"), - PINCTRL_PIN(58, "USB2_OCB_1"), - PINCTRL_PIN(59, "USB2_OCB_2"), - PINCTRL_PIN(60, "USB2_OCB_3"), - PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"), - PINCTRL_PIN(62, "DDSP_HPD_B"), - PINCTRL_PIN(63, "DDSP_HPD_C"), - PINCTRL_PIN(64, "USB2_OCB_0"), - PINCTRL_PIN(65, "PCHHOTB"), - PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"), + PINCTRL_PIN(55, "ESPI_IO_0"), + PINCTRL_PIN(56, "ESPI_IO_1"), + PINCTRL_PIN(57, "ESPI_IO_2"), + PINCTRL_PIN(58, "ESPI_IO_3"), + PINCTRL_PIN(59, "ESPI_CSB"), + PINCTRL_PIN(60, "ESPI_CLK"), + PINCTRL_PIN(61, "ESPI_RESETB"), + PINCTRL_PIN(62, "SMBCLK"), + PINCTRL_PIN(63, "SMBDATA"), + PINCTRL_PIN(64, "SMBALERTB"), + PINCTRL_PIN(65, "CPU_GP_0"), + PINCTRL_PIN(66, "CPU_GP_1"), + PINCTRL_PIN(67, "USB2_OCB_1"), + PINCTRL_PIN(68, "USB2_OCB_2"), + PINCTRL_PIN(69, "USB2_OCB_3"), + PINCTRL_PIN(70, "DDSP_HPD_A_TIME_SYNC_0"), + PINCTRL_PIN(71, "DDSP_HPD_B"), + PINCTRL_PIN(72, "DDSP_HPD_C"), + PINCTRL_PIN(73, "USB2_OCB_0"), + PINCTRL_PIN(74, "PCHHOTB"), + PINCTRL_PIN(75, "ESPI_CLK_LOOPBK"), /* GPP_S */ - PINCTRL_PIN(67, "SNDW1_CLK"), - PINCTRL_PIN(68, "SNDW1_DATA"), - PINCTRL_PIN(69, "SNDW2_CLK"), - PINCTRL_PIN(70, "SNDW2_DATA"), - PINCTRL_PIN(71, "SNDW1_CLK"), - PINCTRL_PIN(72, "SNDW1_DATA"), - PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"), - PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"), + PINCTRL_PIN(76, "SNDW1_CLK"), + PINCTRL_PIN(77, "SNDW1_DATA"), + PINCTRL_PIN(78, "SNDW2_CLK"), + PINCTRL_PIN(79, "SNDW2_DATA"), + PINCTRL_PIN(80, "SNDW1_CLK"), + PINCTRL_PIN(81, "SNDW1_DATA"), + PINCTRL_PIN(82, "SNDW4_CLK_DMIC_CLK_0"), + PINCTRL_PIN(83, "SNDW4_DATA_DMIC_DATA_0"), /* GPP_R */ - PINCTRL_PIN(75, "HDA_BCLK"), - PINCTRL_PIN(76, "HDA_SYNC"), - PINCTRL_PIN(77, "HDA_SDO"), - PINCTRL_PIN(78, "HDA_SDI_0"), - PINCTRL_PIN(79, "HDA_RSTB"), - PINCTRL_PIN(80, "HDA_SDI_1"), - PINCTRL_PIN(81, "I2S1_SFRM"), - PINCTRL_PIN(82, "I2S1_TXD"), + PINCTRL_PIN(84, "HDA_BCLK"), + PINCTRL_PIN(85, "HDA_SYNC"), + PINCTRL_PIN(86, "HDA_SDO"), + PINCTRL_PIN(87, "HDA_SDI_0"), + PINCTRL_PIN(88, "HDA_RSTB"), + PINCTRL_PIN(89, "HDA_SDI_1"), + PINCTRL_PIN(90, "I2S1_SFRM"), + PINCTRL_PIN(91, "I2S1_TXD"), /* GPP_H */ - PINCTRL_PIN(83, "GPPC_H_0"), - PINCTRL_PIN(84, "SD_PWR_EN_B"), - PINCTRL_PIN(85, "MODEM_CLKREQ"), - PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"), - PINCTRL_PIN(87, "I2C2_SDA"), - PINCTRL_PIN(88, "I2C2_SCL"), - PINCTRL_PIN(89, "I2C3_SDA"), - PINCTRL_PIN(90, "I2C3_SCL"), - PINCTRL_PIN(91, "I2C4_SDA"), - PINCTRL_PIN(92, "I2C4_SCL"), - PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"), - PINCTRL_PIN(94, "I2S2_SCLK"), - PINCTRL_PIN(95, "I2S2_SFRM"), - PINCTRL_PIN(96, "I2S2_TXD"), - PINCTRL_PIN(97, "I2S2_RXD"), - PINCTRL_PIN(98, "I2S1_SCLK"), - PINCTRL_PIN(99, "GPPC_H_16"), - PINCTRL_PIN(100, "GPPC_H_17"), - PINCTRL_PIN(101, "GPPC_H_18"), - PINCTRL_PIN(102, "GPPC_H_19"), - PINCTRL_PIN(103, "GPPC_H_20"), - PINCTRL_PIN(104, "GPPC_H_21"), - PINCTRL_PIN(105, "GPPC_H_22"), - PINCTRL_PIN(106, "GPPC_H_23"), + PINCTRL_PIN(92, "GPPC_H_0"), + PINCTRL_PIN(93, "SD_PWR_EN_B"), + PINCTRL_PIN(94, "MODEM_CLKREQ"), + PINCTRL_PIN(95, "SX_EXIT_HOLDOFFB"), + PINCTRL_PIN(96, "I2C2_SDA"), + PINCTRL_PIN(97, "I2C2_SCL"), + PINCTRL_PIN(98, "I2C3_SDA"), + PINCTRL_PIN(99, "I2C3_SCL"), + PINCTRL_PIN(100, "I2C4_SDA"), + PINCTRL_PIN(101, "I2C4_SCL"), + PINCTRL_PIN(102, "CPU_VCCIO_PWR_GATEB"), + PINCTRL_PIN(103, "I2S2_SCLK"), + PINCTRL_PIN(104, "I2S2_SFRM"), + PINCTRL_PIN(105, "I2S2_TXD"), + PINCTRL_PIN(106, "I2S2_RXD"), + PINCTRL_PIN(107, "I2S1_SCLK"), + PINCTRL_PIN(108, "GPPC_H_16"), + PINCTRL_PIN(109, "GPPC_H_17"), + PINCTRL_PIN(110, "GPPC_H_18"), + PINCTRL_PIN(111, "GPPC_H_19"), + PINCTRL_PIN(112, "GPPC_H_20"), + PINCTRL_PIN(113, "GPPC_H_21"), + PINCTRL_PIN(114, "GPPC_H_22"), + PINCTRL_PIN(115, "GPPC_H_23"), /* GPP_D */ - PINCTRL_PIN(107, "SPI1_CSB"), - PINCTRL_PIN(108, "SPI1_CLK"), - PINCTRL_PIN(109, "SPI1_MISO_IO_1"), - PINCTRL_PIN(110, "SPI1_MOSI_IO_0"), - PINCTRL_PIN(111, "ISH_I2C0_SDA"), - PINCTRL_PIN(112, "ISH_I2C0_SCL"), - PINCTRL_PIN(113, "ISH_I2C1_SDA"), - PINCTRL_PIN(114, "ISH_I2C1_SCL"), - PINCTRL_PIN(115, "ISH_SPI_CSB"), - PINCTRL_PIN(116, "ISH_SPI_CLK"), - PINCTRL_PIN(117, "ISH_SPI_MISO"), - PINCTRL_PIN(118, "ISH_SPI_MOSI"), - PINCTRL_PIN(119, "ISH_UART0_RXD"), - PINCTRL_PIN(120, "ISH_UART0_TXD"), - PINCTRL_PIN(121, "ISH_UART0_RTSB"), - PINCTRL_PIN(122, "ISH_UART0_CTSB"), - PINCTRL_PIN(123, "SPI1_IO_2"), - PINCTRL_PIN(124, "SPI1_IO_3"), - PINCTRL_PIN(125, "I2S_MCLK"), - PINCTRL_PIN(126, "CNV_MFUART2_RXD"), - PINCTRL_PIN(127, "CNV_MFUART2_TXD"), - PINCTRL_PIN(128, "CNV_PA_BLANKING"), - PINCTRL_PIN(129, "I2C5_SDA"), - PINCTRL_PIN(130, "I2C5_SCL"), - PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"), - PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"), + PINCTRL_PIN(116, "SPI1_CSB"), + PINCTRL_PIN(117, "SPI1_CLK"), + PINCTRL_PIN(118, "SPI1_MISO_IO_1"), + PINCTRL_PIN(119, "SPI1_MOSI_IO_0"), + PINCTRL_PIN(120, "ISH_I2C0_SDA"), + PINCTRL_PIN(121, "ISH_I2C0_SCL"), + PINCTRL_PIN(122, "ISH_I2C1_SDA"), + PINCTRL_PIN(123, "ISH_I2C1_SCL"), + PINCTRL_PIN(124, "ISH_SPI_CSB"), + PINCTRL_PIN(125, "ISH_SPI_CLK"), + PINCTRL_PIN(126, "ISH_SPI_MISO"), + PINCTRL_PIN(127, "ISH_SPI_MOSI"), + PINCTRL_PIN(128, "ISH_UART0_RXD"), + PINCTRL_PIN(129, "ISH_UART0_TXD"), + PINCTRL_PIN(130, "ISH_UART0_RTSB"), + PINCTRL_PIN(131, "ISH_UART0_CTSB"), + PINCTRL_PIN(132, "SPI1_IO_2"), + PINCTRL_PIN(133, "SPI1_IO_3"), + PINCTRL_PIN(134, "I2S_MCLK"), + PINCTRL_PIN(135, "CNV_MFUART2_RXD"), + PINCTRL_PIN(136, "CNV_MFUART2_TXD"), + PINCTRL_PIN(137, "CNV_PA_BLANKING"), + PINCTRL_PIN(138, "I2C5_SDA"), + PINCTRL_PIN(139, "I2C5_SCL"), + PINCTRL_PIN(140, "GSPI2_CLK_LOOPBK"), + PINCTRL_PIN(141, "SPI1_CLK_LOOPBK"), /* vGPIO */ - PINCTRL_PIN(133, "CNV_BTEN"), - PINCTRL_PIN(134, "CNV_WCEN"), - PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"), - PINCTRL_PIN(136, "CNV_BT_IF_SELECT"), - PINCTRL_PIN(137, "vCNV_BT_UART_TXD"), - PINCTRL_PIN(138, "vCNV_BT_UART_RXD"), - PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"), - PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"), - PINCTRL_PIN(141, "vCNV_MFUART1_TXD"), - PINCTRL_PIN(142, "vCNV_MFUART1_RXD"), - PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"), - PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"), - PINCTRL_PIN(145, "vUART0_TXD"), - PINCTRL_PIN(146, "vUART0_RXD"), - PINCTRL_PIN(147, "vUART0_CTS_B"), - PINCTRL_PIN(148, "vUART0_RTS_B"), - PINCTRL_PIN(149, "vISH_UART0_TXD"), - PINCTRL_PIN(150, "vISH_UART0_RXD"), - PINCTRL_PIN(151, "vISH_UART0_CTS_B"), - PINCTRL_PIN(152, "vISH_UART0_RTS_B"), - PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"), - PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"), - PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"), - PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"), - PINCTRL_PIN(157, "vI2S2_SCLK"), - PINCTRL_PIN(158, "vI2S2_SFRM"), - PINCTRL_PIN(159, "vI2S2_TXD"), - PINCTRL_PIN(160, "vI2S2_RXD"), - PINCTRL_PIN(161, "vSD3_CD_B"), + PINCTRL_PIN(142, "CNV_BTEN"), + PINCTRL_PIN(143, "CNV_WCEN"), + PINCTRL_PIN(144, "CNV_BT_HOST_WAKEB"), + PINCTRL_PIN(145, "CNV_BT_IF_SELECT"), + PINCTRL_PIN(146, "vCNV_BT_UART_TXD"), + PINCTRL_PIN(147, "vCNV_BT_UART_RXD"), + PINCTRL_PIN(148, "vCNV_BT_UART_CTS_B"), + PINCTRL_PIN(149, "vCNV_BT_UART_RTS_B"), + PINCTRL_PIN(150, "vCNV_MFUART1_TXD"), + PINCTRL_PIN(151, "vCNV_MFUART1_RXD"), + PINCTRL_PIN(152, "vCNV_MFUART1_CTS_B"), + PINCTRL_PIN(153, "vCNV_MFUART1_RTS_B"), + PINCTRL_PIN(154, "vUART0_TXD"), + PINCTRL_PIN(155, "vUART0_RXD"), + PINCTRL_PIN(156, "vUART0_CTS_B"), + PINCTRL_PIN(157, "vUART0_RTS_B"), + PINCTRL_PIN(158, "vISH_UART0_TXD"), + PINCTRL_PIN(159, "vISH_UART0_RXD"), + PINCTRL_PIN(160, "vISH_UART0_CTS_B"), + PINCTRL_PIN(161, "vISH_UART0_RTS_B"), + PINCTRL_PIN(162, "vCNV_BT_I2S_BCLK"), + PINCTRL_PIN(163, "vCNV_BT_I2S_WS_SYNC"), + PINCTRL_PIN(164, "vCNV_BT_I2S_SDO"), + PINCTRL_PIN(165, "vCNV_BT_I2S_SDI"), + PINCTRL_PIN(166, "vI2S2_SCLK"), + PINCTRL_PIN(167, "vI2S2_SFRM"), + PINCTRL_PIN(168, "vI2S2_TXD"), + PINCTRL_PIN(169, "vI2S2_RXD"), + PINCTRL_PIN(170, "vSD3_CD_B"), /* GPP_C */ - PINCTRL_PIN(162, "GPPC_C_0"), - PINCTRL_PIN(163, "GPPC_C_1"), - PINCTRL_PIN(164, "GPPC_C_2"), - PINCTRL_PIN(165, "GPPC_C_3"), - PINCTRL_PIN(166, "GPPC_C_4"), - PINCTRL_PIN(167, "GPPC_C_5"), - PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"), - PINCTRL_PIN(169, "SUSACKB"), - PINCTRL_PIN(170, "UART0_RXD"), - PINCTRL_PIN(171, "UART0_TXD"), - PINCTRL_PIN(172, "UART0_RTSB"), - PINCTRL_PIN(173, "UART0_CTSB"), - PINCTRL_PIN(174, "UART1_RXD"), - PINCTRL_PIN(175, "UART1_TXD"), - PINCTRL_PIN(176, "UART1_RTSB"), - PINCTRL_PIN(177, "UART1_CTSB"), - PINCTRL_PIN(178, "I2C0_SDA"), - PINCTRL_PIN(179, "I2C0_SCL"), - PINCTRL_PIN(180, "I2C1_SDA"), - PINCTRL_PIN(181, "I2C1_SCL"), - PINCTRL_PIN(182, "UART2_RXD"), - PINCTRL_PIN(183, "UART2_TXD"), - PINCTRL_PIN(184, "UART2_RTSB"), - PINCTRL_PIN(185, "UART2_CTSB"), + PINCTRL_PIN(171, "GPPC_C_0"), + PINCTRL_PIN(172, "GPPC_C_1"), + PINCTRL_PIN(173, "GPPC_C_2"), + PINCTRL_PIN(174, "GPPC_C_3"), + PINCTRL_PIN(175, "GPPC_C_4"), + PINCTRL_PIN(176, "GPPC_C_5"), + PINCTRL_PIN(177, "SUSWARNB_SUSPWRDNACK"), + PINCTRL_PIN(178, "SUSACKB"), + PINCTRL_PIN(179, "UART0_RXD"), + PINCTRL_PIN(180, "UART0_TXD"), + PINCTRL_PIN(181, "UART0_RTSB"), + PINCTRL_PIN(182, "UART0_CTSB"), + PINCTRL_PIN(183, "UART1_RXD"), + PINCTRL_PIN(184, "UART1_TXD"), + PINCTRL_PIN(185, "UART1_RTSB"), + PINCTRL_PIN(186, "UART1_CTSB"), + PINCTRL_PIN(187, "I2C0_SDA"), + PINCTRL_PIN(188, "I2C0_SCL"), + PINCTRL_PIN(189, "I2C1_SDA"), + PINCTRL_PIN(190, "I2C1_SCL"), + PINCTRL_PIN(191, "UART2_RXD"), + PINCTRL_PIN(192, "UART2_TXD"), + PINCTRL_PIN(193, "UART2_RTSB"), + PINCTRL_PIN(194, "UART2_CTSB"), /* HVCMOS */ - PINCTRL_PIN(186, "L_BKLTEN"), - PINCTRL_PIN(187, "L_BKLTCTL"), - PINCTRL_PIN(188, "L_VDDEN"), - PINCTRL_PIN(189, "SYS_PWROK"), - PINCTRL_PIN(190, "SYS_RESETB"), - PINCTRL_PIN(191, "MLK_RSTB"), + PINCTRL_PIN(195, "L_BKLTEN"), + PINCTRL_PIN(196, "L_BKLTCTL"), + PINCTRL_PIN(197, "L_VDDEN"), + PINCTRL_PIN(198, "SYS_PWROK"), + PINCTRL_PIN(199, "SYS_RESETB"), + PINCTRL_PIN(200, "MLK_RSTB"), /* GPP_E */ - PINCTRL_PIN(192, "ISH_GP_0"), - PINCTRL_PIN(193, "ISH_GP_1"), - PINCTRL_PIN(194, "IMGCLKOUT_1"), - PINCTRL_PIN(195, "ISH_GP_2"), - PINCTRL_PIN(196, "IMGCLKOUT_2"), - PINCTRL_PIN(197, "SATA_LEDB"), - PINCTRL_PIN(198, "IMGCLKOUT_3"), - PINCTRL_PIN(199, "ISH_GP_3"), - PINCTRL_PIN(200, "ISH_GP_4"), - PINCTRL_PIN(201, "ISH_GP_5"), - PINCTRL_PIN(202, "ISH_GP_6"), - PINCTRL_PIN(203, "ISH_GP_7"), - PINCTRL_PIN(204, "IMGCLKOUT_4"), - PINCTRL_PIN(205, "DDPA_CTRLCLK"), - PINCTRL_PIN(206, "DDPA_CTRLDATA"), - PINCTRL_PIN(207, "DDPB_CTRLCLK"), - PINCTRL_PIN(208, "DDPB_CTRLDATA"), - PINCTRL_PIN(209, "DDPC_CTRLCLK"), - PINCTRL_PIN(210, "DDPC_CTRLDATA"), - PINCTRL_PIN(211, "IMGCLKOUT_5"), - PINCTRL_PIN(212, "CNV_BRI_DT"), - PINCTRL_PIN(213, "CNV_BRI_RSP"), - PINCTRL_PIN(214, "CNV_RGI_DT"), - PINCTRL_PIN(215, "CNV_RGI_RSP"), + PINCTRL_PIN(201, "ISH_GP_0"), + PINCTRL_PIN(202, "ISH_GP_1"), + PINCTRL_PIN(203, "IMGCLKOUT_1"), + PINCTRL_PIN(204, "ISH_GP_2"), + PINCTRL_PIN(205, "IMGCLKOUT_2"), + PINCTRL_PIN(206, "SATA_LEDB"), + PINCTRL_PIN(207, "IMGCLKOUT_3"), + PINCTRL_PIN(208, "ISH_GP_3"), + PINCTRL_PIN(209, "ISH_GP_4"), + PINCTRL_PIN(210, "ISH_GP_5"), + PINCTRL_PIN(211, "ISH_GP_6"), + PINCTRL_PIN(212, "ISH_GP_7"), + PINCTRL_PIN(213, "IMGCLKOUT_4"), + PINCTRL_PIN(214, "DDPA_CTRLCLK"), + PINCTRL_PIN(215, "DDPA_CTRLDATA"), + PINCTRL_PIN(216, "DDPB_CTRLCLK"), + PINCTRL_PIN(217, "DDPB_CTRLDATA"), + PINCTRL_PIN(218, "DDPC_CTRLCLK"), + PINCTRL_PIN(219, "DDPC_CTRLDATA"), + PINCTRL_PIN(220, "IMGCLKOUT_5"), + PINCTRL_PIN(221, "CNV_BRI_DT"), + PINCTRL_PIN(222, "CNV_BRI_RSP"), + PINCTRL_PIN(223, "CNV_RGI_DT"), + PINCTRL_PIN(224, "CNV_RGI_RSP"), /* GPP_G */ - PINCTRL_PIN(216, "SD3_CMD"), - PINCTRL_PIN(217, "SD3_D0"), - PINCTRL_PIN(218, "SD3_D1"), - PINCTRL_PIN(219, "SD3_D2"), - PINCTRL_PIN(220, "SD3_D3"), - PINCTRL_PIN(221, "SD3_CDB"), - PINCTRL_PIN(222, "SD3_CLK"), - PINCTRL_PIN(223, "SD3_WP"), + PINCTRL_PIN(225, "SD3_CMD"), + PINCTRL_PIN(226, "SD3_D0"), + PINCTRL_PIN(227, "SD3_D1"), + PINCTRL_PIN(228, "SD3_D2"), + PINCTRL_PIN(229, "SD3_D3"), + PINCTRL_PIN(230, "SD3_CDB"), + PINCTRL_PIN(231, "SD3_CLK"), + PINCTRL_PIN(232, "SD3_WP"), }; static const struct intel_padgroup jsl_community0_gpps[] = { JSL_GPP(0, 0, 19, 320), /* GPP_F */ - JSL_GPP(1, 20, 45, 32), /* GPP_B */ - JSL_GPP(2, 46, 66, 64), /* GPP_A */ - JSL_GPP(3, 67, 74, 96), /* GPP_S */ - JSL_GPP(4, 75, 82, 128), /* GPP_R */ + JSL_GPP(1, 20, 28, INTEL_GPIO_BASE_NOMAP), /* SPI */ + JSL_GPP(2, 29, 54, 32), /* GPP_B */ + JSL_GPP(3, 55, 75, 64), /* GPP_A */ + JSL_GPP(4, 76, 83, 96), /* GPP_S */ + JSL_GPP(5, 84, 91, 128), /* GPP_R */ }; static const struct intel_padgroup jsl_community1_gpps[] = { - JSL_GPP(0, 83, 106, 160), /* GPP_H */ - JSL_GPP(1, 107, 132, 192), /* GPP_D */ - JSL_GPP(2, 133, 161, 224), /* vGPIO */ - JSL_GPP(3, 162, 185, 256), /* GPP_C */ + JSL_GPP(0, 92, 115, 160), /* GPP_H */ + JSL_GPP(1, 116, 141, 192), /* GPP_D */ + JSL_GPP(2, 142, 170, 224), /* vGPIO */ + JSL_GPP(3, 171, 194, 256), /* GPP_C */ }; static const struct intel_padgroup jsl_community4_gpps[] = { - JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */ - JSL_GPP(1, 192, 215, 288), /* GPP_E */ + JSL_GPP(0, 195, 200, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */ + JSL_GPP(1, 201, 224, 288), /* GPP_E */ }; static const struct intel_padgroup jsl_community5_gpps[] = { - JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */ + JSL_GPP(0, 225, 232, INTEL_GPIO_BASE_ZERO), /* GPP_G */ }; static const struct intel_community jsl_communities[] = { - JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps), - JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps), - JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps), - JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps), + JSL_COMMUNITY(0, 0, 91, jsl_community0_gpps), + JSL_COMMUNITY(1, 92, 194, jsl_community1_gpps), + JSL_COMMUNITY(2, 195, 224, jsl_community4_gpps), + JSL_COMMUNITY(3, 225, 232, jsl_community5_gpps), }; static const struct intel_pinctrl_soc_data jsl_soc_data = { @@ -336,7 +347,6 @@ static struct platform_driver jsl_pinctrl_driver = { .pm = &jsl_pinctrl_pm_ops, }, }; - module_platform_driver(jsl_pinctrl_driver); MODULE_AUTHOR("Andy Shevchenko <[email protected]>"); diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index e4ff8da1b894..3ae141e0b421 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -745,6 +745,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; bits |= BUFCFG_PU_EN; + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 20000; + switch (arg) { case 50000: bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; @@ -765,6 +769,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin, mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK; bits |= BUFCFG_PD_EN; + /* Set default strength value in case none is given */ + if (arg == 1) + arg = 20000; + switch (arg) { case 50000: bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 4aea3e05e8c6..899c16c17b6d 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -429,7 +429,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -437,7 +436,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -445,7 +443,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -453,8 +450,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); - pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; @@ -462,8 +457,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; - pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); - pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h index b297c4aba2ba..a664dfe5fd2f 100644 --- a/drivers/ptp/idt8a340_reg.h +++ b/drivers/ptp/idt8a340_reg.h @@ -103,6 +103,7 @@ #define SM_RESET_CMD 0x5A #define GENERAL_STATUS 0xc014 +#define BOOT_STATUS 0x0000 #define HW_REV_ID 0x000A #define BOND_ID 0x000B #define HW_CSR_ID 0x000C diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index 663255774c0b..051511f5e1f2 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -33,14 +33,41 @@ module_param(firmware, charp, 0); #define SETTIME_CORRECTION (0) -static long set_write_phase_ready(struct ptp_clock_info *ptp) +static int contains_full_configuration(const struct firmware *fw) { - struct idtcm_channel *channel = - container_of(ptp, struct idtcm_channel, caps); + s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES; + struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data; + s32 count = 0; + u16 regaddr; + u8 loaddr; + s32 len; + + /* If the firmware contains 'full configuration' SM_RESET can be used + * to ensure proper configuration. + * + * Full configuration is defined as the number of programmable + * bytes within the configuration range minus page offset addr range. + */ + for (len = fw->size; len > 0; len -= sizeof(*rec)) { + regaddr = rec->hiaddr << 8; + regaddr |= rec->loaddr; - channel->write_phase_ready = 1; + loaddr = rec->loaddr; - return 0; + rec++; + + /* Top (status registers) and bottom are read-only */ + if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH) + continue; + + /* Page size 128, last 4 bytes of page skipped */ + if ((loaddr > 0x7b && loaddr <= 0x7f) || loaddr > 0xfb) + continue; + + count++; + } + + return (count >= full_count); } static int char_array_to_timespec(u8 *buf, @@ -261,6 +288,53 @@ static int idtcm_write(struct idtcm *idtcm, return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true); } +static int clear_boot_status(struct idtcm *idtcm) +{ + int err; + u8 buf[4] = {0}; + + err = idtcm_write(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf)); + + return err; +} + +static int read_boot_status(struct idtcm *idtcm, u32 *status) +{ + int err; + u8 buf[4] = {0}; + + err = idtcm_read(idtcm, GENERAL_STATUS, BOOT_STATUS, buf, sizeof(buf)); + + *status = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + + return err; +} + +static int wait_for_boot_status_ready(struct idtcm *idtcm) +{ + u32 status = 0; + u8 i = 30; /* 30 * 100ms = 3s */ + int err; + + do { + err = read_boot_status(idtcm, &status); + + if (err) + return err; + + if (status == 0xA0) + return 0; + + msleep(100); + i--; + + } while (i); + + dev_warn(&idtcm->client->dev, "%s timed out\n", __func__); + + return -EBUSY; +} + static int _idtcm_gettime(struct idtcm_channel *channel, struct timespec64 *ts) { @@ -599,8 +673,9 @@ static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel, if (idtcm->calculate_overhead_flag) { /* Assumption: I2C @ 400KHz */ - total_overhead_ns = ktime_to_ns(ktime_get_raw() - - idtcm->start_time) + ktime_t diff = ktime_sub(ktime_get_raw(), + idtcm->start_time); + total_overhead_ns = ktime_to_ns(diff) + idtcm->tod_write_overhead_ns + SETTIME_CORRECTION; @@ -670,7 +745,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel, if (err) return err; - if (cmd == 0) + if ((cmd & TOD_WRITE_SELECTION_MASK) == 0) break; if (++count > 20) { @@ -683,49 +758,74 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel, return 0; } -static int _idtcm_settime(struct idtcm_channel *channel, - struct timespec64 const *ts, - enum hw_tod_write_trig_sel wr_trig) +static int get_output_base_addr(u8 outn) { - struct idtcm *idtcm = channel->idtcm; - int err; - int i; - u8 trig_sel; + int base; - err = _idtcm_set_dpll_hw_tod(channel, ts, wr_trig); - - if (err) - return err; - - /* Wait for the operation to complete. */ - for (i = 0; i < 10000; i++) { - err = idtcm_read(idtcm, channel->hw_dpll_n, - HW_DPLL_TOD_CTRL_1, &trig_sel, - sizeof(trig_sel)); + switch (outn) { + case 0: + base = OUTPUT_0; + break; + case 1: + base = OUTPUT_1; + break; + case 2: + base = OUTPUT_2; + break; + case 3: + base = OUTPUT_3; + break; + case 4: + base = OUTPUT_4; + break; + case 5: + base = OUTPUT_5; + break; + case 6: + base = OUTPUT_6; + break; + case 7: + base = OUTPUT_7; + break; + case 8: + base = OUTPUT_8; + break; + case 9: + base = OUTPUT_9; + break; + case 10: + base = OUTPUT_10; + break; + case 11: + base = OUTPUT_11; + break; + default: + base = -EINVAL; + } - if (err) - return err; + return base; +} - if (trig_sel == 0x4a) - break; +static int _idtcm_settime_deprecated(struct idtcm_channel *channel, + struct timespec64 const *ts) +{ + struct idtcm *idtcm = channel->idtcm; + int err; - err = 1; - } + err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB); if (err) { dev_err(&idtcm->client->dev, - "Failed at line %d in func %s!\n", - __LINE__, - __func__); + "%s: Set HW ToD failed\n", __func__); return err; } return idtcm_sync_pps_output(channel); } -static int _idtcm_settime_v487(struct idtcm_channel *channel, - struct timespec64 const *ts, - enum scsr_tod_write_type_sel wr_type) +static int _idtcm_settime(struct idtcm_channel *channel, + struct timespec64 const *ts, + enum scsr_tod_write_type_sel wr_type) { return _idtcm_set_dpll_scsr_tod(channel, ts, SCSR_TOD_WR_TRIG_SEL_IMMEDIATE, @@ -830,6 +930,7 @@ static int set_tod_write_overhead(struct idtcm_channel *channel) ktime_t start; ktime_t stop; + ktime_t diff; char buf[TOD_BYTE_COUNT] = {0}; @@ -849,7 +950,9 @@ static int set_tod_write_overhead(struct idtcm_channel *channel) stop = ktime_get_raw(); - current_ns = ktime_to_ns(stop - start); + diff = ktime_sub(stop, start); + + current_ns = ktime_to_ns(diff); if (i == 0) { lowest_ns = current_ns; @@ -864,14 +967,14 @@ static int set_tod_write_overhead(struct idtcm_channel *channel) return err; } -static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta) +static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta) { int err; struct idtcm *idtcm = channel->idtcm; struct timespec64 ts; s64 now; - if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) { + if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) { err = idtcm_do_phase_pull_in(channel, delta, 0); } else { idtcm->calculate_overhead_flag = 1; @@ -891,7 +994,7 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta) ts = ns_to_timespec64(now); - err = _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB); + err = _idtcm_settime_deprecated(channel, &ts); } return err; @@ -899,13 +1002,31 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta) static int idtcm_state_machine_reset(struct idtcm *idtcm) { - int err; u8 byte = SM_RESET_CMD; + u32 status = 0; + int err; + u8 i; + + clear_boot_status(idtcm); err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte)); - if (!err) - msleep_interruptible(POST_SM_RESET_DELAY_MS); + if (!err) { + for (i = 0; i < 30; i++) { + msleep_interruptible(100); + read_boot_status(idtcm, &status); + + if (status == 0xA0) { + dev_dbg(&idtcm->client->dev, + "SM_RESET completed in %d ms\n", + i * 100); + break; + } + } + + if (!status) + dev_err(&idtcm->client->dev, "Timed out waiting for CM_RESET to complete\n"); + } return err; } @@ -1099,7 +1220,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm, rec = (struct idtcm_fwrc *) fw->data; - if (fw->size > 0) + if (contains_full_configuration(fw)) idtcm_state_machine_reset(idtcm); for (len = fw->size; len > 0; len -= sizeof(*rec)) { @@ -1151,11 +1272,19 @@ static int idtcm_output_enable(struct idtcm_channel *channel, bool enable, unsigned int outn) { struct idtcm *idtcm = channel->idtcm; + int base; int err; u8 val; - err = idtcm_read(idtcm, OUTPUT_MODULE_FROM_INDEX(outn), - OUT_CTRL_1, &val, sizeof(val)); + base = get_output_base_addr(outn); + + if (!(base > 0)) { + dev_err(&idtcm->client->dev, + "%s - Unsupported out%d", __func__, outn); + return base; + } + + err = idtcm_read(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val)); if (err) return err; @@ -1165,8 +1294,7 @@ static int idtcm_output_enable(struct idtcm_channel *channel, else val &= ~SQUELCH_DISABLE; - return idtcm_write(idtcm, OUTPUT_MODULE_FROM_INDEX(outn), - OUT_CTRL_1, &val, sizeof(val)); + return idtcm_write(idtcm, (u16)base, OUT_CTRL_1, &val, sizeof(val)); } static int idtcm_output_mask_enable(struct idtcm_channel *channel, @@ -1209,6 +1337,23 @@ static int idtcm_perout_enable(struct idtcm_channel *channel, return idtcm_output_enable(channel, enable, perout->index); } +static int idtcm_get_pll_mode(struct idtcm_channel *channel, + enum pll_mode *pll_mode) +{ + struct idtcm *idtcm = channel->idtcm; + int err; + u8 dpll_mode; + + err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE, + &dpll_mode, sizeof(dpll_mode)); + if (err) + return err; + + *pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK; + + return 0; +} + static int idtcm_set_pll_mode(struct idtcm_channel *channel, enum pll_mode pll_mode) { @@ -1260,16 +1405,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns) if (err) return err; - - channel->write_phase_ready = 0; - - ptp_schedule_worker(channel->ptp_clock, - msecs_to_jiffies(WR_PHASE_SETUP_MS)); } - if (!channel->write_phase_ready) - delta_ns = 0; - offset_ps = (s64)delta_ns * 1000; /* @@ -1282,7 +1419,7 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns) else if (offset_ps < -MAX_ABS_WRITE_PHASE_PICOSECONDS) offset_ps = -MAX_ABS_WRITE_PHASE_PICOSECONDS; - phase_50ps = DIV_ROUND_CLOSEST(div64_s64(offset_ps, 50), 1); + phase_50ps = div_s64(offset_ps, 50); for (i = 0; i < 4; i++) { buf[i] = phase_50ps & 0xff; @@ -1299,7 +1436,6 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm) { struct idtcm *idtcm = channel->idtcm; u8 i; - bool neg_adj = 0; int err; u8 buf[6] = {0}; s64 fcw; @@ -1323,18 +1459,11 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm) * FCW = ------------- * 111 * 2^4 */ - if (scaled_ppm < 0) { - neg_adj = 1; - scaled_ppm = -scaled_ppm; - } /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */ fcw = scaled_ppm * 244140625ULL; - fcw = div_u64(fcw, 1776); - - if (neg_adj) - fcw = -fcw; + fcw = div_s64(fcw, 1776); for (i = 0; i < 6; i++) { buf[i] = fcw & 0xff; @@ -1369,8 +1498,8 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) return err; } -static int idtcm_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +static int idtcm_settime_deprecated(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); @@ -1379,7 +1508,7 @@ static int idtcm_settime(struct ptp_clock_info *ptp, mutex_lock(&idtcm->reg_lock); - err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB); + err = _idtcm_settime_deprecated(channel, ts); if (err) dev_err(&idtcm->client->dev, @@ -1392,7 +1521,7 @@ static int idtcm_settime(struct ptp_clock_info *ptp, return err; } -static int idtcm_settime_v487(struct ptp_clock_info *ptp, +static int idtcm_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct idtcm_channel *channel = @@ -1402,7 +1531,7 @@ static int idtcm_settime_v487(struct ptp_clock_info *ptp, mutex_lock(&idtcm->reg_lock); - err = _idtcm_settime_v487(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); + err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); if (err) dev_err(&idtcm->client->dev, @@ -1415,7 +1544,7 @@ static int idtcm_settime_v487(struct ptp_clock_info *ptp, return err; } -static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) +static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta) { struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); @@ -1424,7 +1553,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) mutex_lock(&idtcm->reg_lock); - err = _idtcm_adjtime(channel, delta); + err = _idtcm_adjtime_deprecated(channel, delta); if (err) dev_err(&idtcm->client->dev, @@ -1437,7 +1566,7 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) return err; } -static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta) +static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); @@ -1446,7 +1575,7 @@ static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta) enum scsr_tod_write_type_sel type; int err; - if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_V487) { + if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) { err = idtcm_do_phase_pull_in(channel, delta, 0); if (err) dev_err(&idtcm->client->dev, @@ -1466,7 +1595,7 @@ static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta) mutex_lock(&idtcm->reg_lock); - err = _idtcm_settime_v487(channel, &ts, type); + err = _idtcm_settime(channel, &ts, type); if (err) dev_err(&idtcm->client->dev, @@ -1810,10 +1939,14 @@ static int idtcm_enable_tod(struct idtcm_channel *channel) if (err) return err; - return _idtcm_settime(channel, &ts, HW_TOD_WR_TRIG_SEL_MSB); + if (idtcm->deprecated) + return _idtcm_settime_deprecated(channel, &ts); + else + return _idtcm_settime(channel, &ts, + SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); } -static void idtcm_display_version_info(struct idtcm *idtcm) +static void idtcm_set_version_info(struct idtcm *idtcm) { u8 major; u8 minor; @@ -1835,34 +1968,37 @@ static void idtcm_display_version_info(struct idtcm *idtcm) snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u", major, minor, hotfix); + if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) + idtcm->deprecated = 0; + else + idtcm->deprecated = 1; + dev_info(&idtcm->client->dev, fmt, major, minor, hotfix, product_id, hw_rev_id, config_select); } -static const struct ptp_clock_info idtcm_caps_v487 = { +static const struct ptp_clock_info idtcm_caps = { .owner = THIS_MODULE, .max_adj = 244000, .n_per_out = 12, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, - .adjtime = &idtcm_adjtime_v487, + .adjtime = &idtcm_adjtime, .gettime64 = &idtcm_gettime, - .settime64 = &idtcm_settime_v487, + .settime64 = &idtcm_settime, .enable = &idtcm_enable, - .do_aux_work = &set_write_phase_ready, }; -static const struct ptp_clock_info idtcm_caps = { +static const struct ptp_clock_info idtcm_caps_deprecated = { .owner = THIS_MODULE, .max_adj = 244000, .n_per_out = 12, .adjphase = &idtcm_adjphase, .adjfine = &idtcm_adjfine, - .adjtime = &idtcm_adjtime, + .adjtime = &idtcm_adjtime_deprecated, .gettime64 = &idtcm_gettime, - .settime64 = &idtcm_settime, + .settime64 = &idtcm_settime_deprecated, .enable = &idtcm_enable, - .do_aux_work = &set_write_phase_ready, }; static int configure_channel_pll(struct idtcm_channel *channel) @@ -1984,15 +2120,15 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) channel->idtcm = idtcm; - if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) - channel->caps = idtcm_caps_v487; + if (idtcm->deprecated) + channel->caps = idtcm_caps_deprecated; else channel->caps = idtcm_caps; snprintf(channel->caps.name, sizeof(channel->caps.name), "IDT CM TOD%u", index); - if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) { + if (!idtcm->deprecated) { err = idtcm_enable_tod_sync(channel); if (err) { dev_err(&idtcm->client->dev, @@ -2003,12 +2139,11 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) } } - err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY); + /* Sync pll mode with hardware */ + err = idtcm_get_pll_mode(channel, &channel->pll_mode); if (err) { dev_err(&idtcm->client->dev, - "Failed at line %d in func %s!\n", - __LINE__, - __func__); + "Error: %s - Unable to read pll mode\n", __func__); return err; } @@ -2032,8 +2167,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) if (!channel->ptp_clock) return -ENOTSUPP; - channel->write_phase_ready = 0; - dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d\n", index, channel->ptp_clock->index); @@ -2094,7 +2227,7 @@ static int idtcm_probe(struct i2c_client *client, mutex_init(&idtcm->reg_lock); mutex_lock(&idtcm->reg_lock); - idtcm_display_version_info(idtcm); + idtcm_set_version_info(idtcm); err = idtcm_load_firmware(idtcm, &client->dev); @@ -2102,6 +2235,9 @@ static int idtcm_probe(struct i2c_client *client, dev_warn(&idtcm->client->dev, "loading firmware failed with %d\n", err); + if (wait_for_boot_status_ready(idtcm)) + dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0\n"); + if (idtcm->tod_mask) { for (i = 0; i < MAX_TOD; i++) { if (idtcm->tod_mask & (1 << i)) { diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index 82840d72364a..645de2c66b64 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -15,6 +15,7 @@ #define FW_FILENAME "idtcm.bin" #define MAX_TOD (4) #define MAX_PLL (8) +#define MAX_OUTPUT (12) #define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL) @@ -44,18 +45,20 @@ #define DEFAULT_TOD2_PTP_PLL (2) #define DEFAULT_TOD3_PTP_PLL (3) -#define POST_SM_RESET_DELAY_MS (3000) -#define PHASE_PULL_IN_THRESHOLD_NS (150000) -#define PHASE_PULL_IN_THRESHOLD_NS_V487 (15000) -#define TOD_WRITE_OVERHEAD_COUNT_MAX (2) -#define TOD_BYTE_COUNT (11) -#define WR_PHASE_SETUP_MS (5000) +#define POST_SM_RESET_DELAY_MS (3000) +#define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000) +#define PHASE_PULL_IN_THRESHOLD_NS (15000) +#define TOD_WRITE_OVERHEAD_COUNT_MAX (2) +#define TOD_BYTE_COUNT (11) -#define OUTPUT_MODULE_FROM_INDEX(index) (OUTPUT_0 + (index) * 0x10) +#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) -#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) +#define IDTCM_MAX_WRITE_COUNT (512) -#define IDTCM_MAX_WRITE_COUNT (512) +#define FULL_FW_CFG_BYTES (SCRATCH - GPIO_USER_CONTROL) +#define FULL_FW_CFG_SKIPPED_BYTES (((SCRATCH >> 7) \ + - (GPIO_USER_CONTROL >> 7)) \ + * 4) /* 4 bytes skipped every 0x80 */ /* Values of DPLL_N.DPLL_MODE.PLL_MODE */ enum pll_mode { @@ -120,7 +123,7 @@ struct idtcm_channel { enum pll_mode pll_mode; u8 pll; u16 output_mask; - int write_phase_ready; + u8 output_phase_adj[MAX_OUTPUT][4]; }; struct idtcm { @@ -129,6 +132,7 @@ struct idtcm { u8 page_offset; u8 tod_mask; char version[16]; + u8 deprecated; /* Overhead calculation for adjtime */ u8 calculate_overhead_flag; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 483a9ecfcbb1..444385da5792 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -210,18 +210,12 @@ out: static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); -static struct attribute *ccwgroup_attrs[] = { +static struct attribute *ccwgroup_dev_attrs[] = { &dev_attr_online.attr, &dev_attr_ungroup.attr, NULL, }; -static struct attribute_group ccwgroup_attr_group = { - .attrs = ccwgroup_attrs, -}; -static const struct attribute_group *ccwgroup_attr_groups[] = { - &ccwgroup_attr_group, - NULL, -}; +ATTRIBUTE_GROUPS(ccwgroup_dev); static void ccwgroup_ungroup_workfn(struct work_struct *work) { @@ -384,7 +378,6 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); - gdev->dev.groups = ccwgroup_attr_groups; if (gdrv) { gdev->dev.driver = &gdrv->driver; @@ -487,6 +480,7 @@ static void ccwgroup_shutdown(struct device *dev) static struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", + .dev_groups = ccwgroup_dev_groups, .remove = ccwgroup_remove, .shutdown = ccwgroup_shutdown, }; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0e9af2fbaa76..6f5ddc3eab8c 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -424,8 +424,6 @@ enum qeth_qdio_out_buffer_state { /* Received QAOB notification on CQ: */ QETH_QDIO_BUF_QAOB_OK, QETH_QDIO_BUF_QAOB_ERROR, - /* Handled via transfer pending / completion queue. */ - QETH_QDIO_BUF_HANDLED_DELAYED, }; struct qeth_qdio_out_buffer { @@ -624,7 +622,7 @@ struct qeth_reply { }; struct qeth_cmd_buffer { - struct list_head list; + struct list_head list_entry; struct completion done; spinlock_t lock; unsigned int length; @@ -1063,10 +1061,8 @@ extern const struct qeth_discipline qeth_l2_discipline; extern const struct qeth_discipline qeth_l3_discipline; extern const struct ethtool_ops qeth_ethtool_ops; extern const struct ethtool_ops qeth_osn_ethtool_ops; -extern const struct attribute_group *qeth_generic_attr_groups[]; -extern const struct attribute_group *qeth_osn_attr_groups[]; -extern const struct attribute_group qeth_device_attr_group; -extern const struct attribute_group qeth_device_blkt_group; +extern const struct attribute_group *qeth_dev_groups[]; +extern const struct attribute_group *qeth_osn_dev_groups[]; extern const struct device_type qeth_generic_devtype; const char *qeth_get_cardname_short(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 319190824cd2..f4b60294a969 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -75,7 +75,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, enum iucv_tx_notify notification); static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, int budget); -static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); static void qeth_close_dev_handler(struct work_struct *work) { @@ -478,8 +477,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, while (c) { if (forced_cleanup || - atomic_read(&c->state) == - QETH_QDIO_BUF_HANDLED_DELAYED) { + atomic_read(&c->state) == QETH_QDIO_BUF_EMPTY) { struct qeth_qdio_out_buffer *f = c; QETH_CARD_TEXT(f->q->card, 5, "fp"); @@ -517,18 +515,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, buffer = (struct qeth_qdio_out_buffer *) aob->user1; QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); - /* Free dangling allocations. The attached skbs are handled by - * qeth_cleanup_handled_pending(). - */ - for (i = 0; - i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); - i++) { - void *data = phys_to_virt(aob->sba[i]); - - if (data && buffer->is_header[i]) - kmem_cache_free(qeth_core_header_cache, data); - } - if (aob->aorc) { QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); new_state = QETH_QDIO_BUF_QAOB_ERROR; @@ -536,10 +522,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, switch (atomic_xchg(&buffer->state, new_state)) { case QETH_QDIO_BUF_PRIMED: - /* Faster than TX completion code. */ - notification = qeth_compute_cq_notification(aob->aorc, 0); - qeth_notify_skbs(buffer->q, buffer, notification); - atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + /* Faster than TX completion code, let it handle the async + * completion for us. + */ break; case QETH_QDIO_BUF_PENDING: /* TX completion code is active and will handle the async @@ -550,7 +535,21 @@ static void qeth_qdio_handle_aob(struct qeth_card *card, /* TX completion code is already finished. */ notification = qeth_compute_cq_notification(aob->aorc, 1); qeth_notify_skbs(buffer->q, buffer, notification); - atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + void *data = phys_to_virt(aob->sba[i]); + + if (data && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, data); + buffer->is_header[i] = 0; + } + + atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY); break; default: WARN_ON_ONCE(1); @@ -615,7 +614,7 @@ static void qeth_enqueue_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_add_tail(&iob->list, &card->cmd_waiter_list); + list_add_tail(&iob->list_entry, &card->cmd_waiter_list); spin_unlock_irq(&card->lock); } @@ -623,7 +622,7 @@ static void qeth_dequeue_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) { spin_lock_irq(&card->lock); - list_del(&iob->list); + list_del(&iob->list_entry); spin_unlock_irq(&card->lock); } @@ -977,7 +976,7 @@ static void qeth_clear_ipacmd_list(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(iob, &card->cmd_waiter_list, list) + list_for_each_entry(iob, &card->cmd_waiter_list, list_entry) qeth_notify_cmd(iob, -ECANCELED); spin_unlock_irqrestore(&card->lock, flags); } @@ -1047,7 +1046,6 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, init_completion(&iob->done); spin_lock_init(&iob->lock); - INIT_LIST_HEAD(&iob->list); refcount_set(&iob->ref_count, 1); iob->channel = channel; iob->timeout = timeout; @@ -1094,7 +1092,7 @@ static void qeth_issue_next_read_cb(struct qeth_card *card, /* match against pending cmd requests */ spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(tmp, &card->cmd_waiter_list, list) { + list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) { if (tmp->match && tmp->match(tmp, iob)) { request = tmp; /* take the object outside the lock */ @@ -6079,9 +6077,13 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, QDIO_OUTBUF_STATE_FLAG_PENDING)) { WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); - if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, - QETH_QDIO_BUF_PENDING) == - QETH_QDIO_BUF_PRIMED) { + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); + + switch (atomic_cmpxchg(&buffer->state, + QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING)) { + case QETH_QDIO_BUF_PRIMED: + /* We have initial ownership, no QAOB (yet): */ qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); /* Handle race with qeth_qdio_handle_aob(): */ @@ -6089,39 +6091,49 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, QETH_QDIO_BUF_NEED_QAOB)) { case QETH_QDIO_BUF_PENDING: /* No concurrent QAOB notification. */ - break; + + /* Prepare the queue slot for immediate re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + + /* Skip clearing the buffer: */ + return; case QETH_QDIO_BUF_QAOB_OK: qeth_notify_skbs(queue, buffer, TX_NOTIFY_DELAYED_OK); - atomic_set(&buffer->state, - QETH_QDIO_BUF_HANDLED_DELAYED); + error = false; break; case QETH_QDIO_BUF_QAOB_ERROR: qeth_notify_skbs(queue, buffer, TX_NOTIFY_DELAYED_GENERALERROR); - atomic_set(&buffer->state, - QETH_QDIO_BUF_HANDLED_DELAYED); + error = true; break; default: WARN_ON_ONCE(1); } - } - - QETH_CARD_TEXT_(card, 5, "pel%u", bidx); - /* prepare the queue slot for re-use: */ - qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); - if (qeth_init_qdio_out_buf(queue, bidx)) { - QETH_CARD_TEXT(card, 2, "outofbuf"); - qeth_schedule_recovery(card); + break; + case QETH_QDIO_BUF_QAOB_OK: + /* qeth_qdio_handle_aob() already received a QAOB: */ + qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK); + error = false; + break; + case QETH_QDIO_BUF_QAOB_ERROR: + /* qeth_qdio_handle_aob() already received a QAOB: */ + qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR); + error = true; + break; + default: + WARN_ON_ONCE(1); } - - return; - } - - if (card->options.cq == QETH_CQ_ENABLED) + } else if (card->options.cq == QETH_CQ_ENABLED) { qeth_notify_skbs(queue, buffer, qeth_compute_cq_notification(sflags, 0)); + } + qeth_clear_output_buffer(queue, buffer, error, budget); } @@ -6376,13 +6388,11 @@ void qeth_core_free_discipline(struct qeth_card *card) const struct device_type qeth_generic_devtype = { .name = "qeth_generic", - .groups = qeth_generic_attr_groups, }; EXPORT_SYMBOL_GPL(qeth_generic_devtype); static const struct device_type qeth_osn_devtype = { .name = "qeth_osn", - .groups = qeth_osn_attr_groups, }; #define DBF_NAME_LEN 20 @@ -6562,6 +6572,11 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) if (rc) goto err_chp_desc; + if (IS_OSN(card)) + gdev->dev.groups = qeth_osn_dev_groups; + else + gdev->dev.groups = qeth_dev_groups; + enforced_disc = qeth_enforce_discipline(card); switch (enforced_disc) { case QETH_DISCIPLINE_UNDETERMINED: diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 4441b3393eaf..a0f777f76f66 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -640,23 +640,17 @@ static struct attribute *qeth_blkt_device_attrs[] = { &dev_attr_inter_jumbo.attr, NULL, }; -const struct attribute_group qeth_device_blkt_group = { + +static const struct attribute_group qeth_dev_blkt_group = { .name = "blkt", .attrs = qeth_blkt_device_attrs, }; -EXPORT_SYMBOL_GPL(qeth_device_blkt_group); -static struct attribute *qeth_device_attrs[] = { - &dev_attr_state.attr, - &dev_attr_chpid.attr, - &dev_attr_if_name.attr, - &dev_attr_card_type.attr, +static struct attribute *qeth_dev_extended_attrs[] = { &dev_attr_inbuf_size.attr, &dev_attr_portno.attr, &dev_attr_portname.attr, &dev_attr_priority_queueing.attr, - &dev_attr_buffer_count.attr, - &dev_attr_recover.attr, &dev_attr_performance_stats.attr, &dev_attr_layer2.attr, &dev_attr_isolation.attr, @@ -664,18 +658,12 @@ static struct attribute *qeth_device_attrs[] = { &dev_attr_switch_attrs.attr, NULL, }; -const struct attribute_group qeth_device_attr_group = { - .attrs = qeth_device_attrs, -}; -EXPORT_SYMBOL_GPL(qeth_device_attr_group); -const struct attribute_group *qeth_generic_attr_groups[] = { - &qeth_device_attr_group, - &qeth_device_blkt_group, - NULL, +static const struct attribute_group qeth_dev_extended_group = { + .attrs = qeth_dev_extended_attrs, }; -static struct attribute *qeth_osn_device_attrs[] = { +static struct attribute *qeth_dev_attrs[] = { &dev_attr_state.attr, &dev_attr_chpid.attr, &dev_attr_if_name.attr, @@ -684,10 +672,19 @@ static struct attribute *qeth_osn_device_attrs[] = { &dev_attr_recover.attr, NULL, }; -static struct attribute_group qeth_osn_device_attr_group = { - .attrs = qeth_osn_device_attrs, + +static const struct attribute_group qeth_dev_group = { + .attrs = qeth_dev_attrs, }; -const struct attribute_group *qeth_osn_attr_groups[] = { - &qeth_osn_device_attr_group, + +const struct attribute_group *qeth_osn_dev_groups[] = { + &qeth_dev_group, + NULL, +}; + +const struct attribute_group *qeth_dev_groups[] = { + &qeth_dev_group, + &qeth_dev_extended_group, + &qeth_dev_blkt_group, NULL, }; diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h index 296d73d84326..7c646e2fed7e 100644 --- a/drivers/s390/net/qeth_l2.h +++ b/drivers/s390/net/qeth_l2.h @@ -11,8 +11,6 @@ extern const struct attribute_group *qeth_l2_attr_groups[]; -int qeth_l2_create_device_attributes(struct device *); -void qeth_l2_remove_device_attributes(struct device *); int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 393aef681e44..4ed0fb0705a5 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2189,7 +2189,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) mutex_init(&card->sbp_lock); if (gdev->dev.type == &qeth_generic_devtype) { - rc = qeth_l2_create_device_attributes(&gdev->dev); + rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups); if (rc) return rc; } @@ -2203,7 +2203,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev) struct qeth_card *card = dev_get_drvdata(&gdev->dev); if (gdev->dev.type == &qeth_generic_devtype) - qeth_l2_remove_device_attributes(&gdev->dev); + device_remove_groups(&gdev->dev, qeth_l2_attr_groups); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index 4ba3bc57263f..a617351fff57 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -376,26 +376,7 @@ static struct attribute_group qeth_l2_vnicc_attr_group = { .name = "vnicc", }; -static const struct attribute_group *qeth_l2_only_attr_groups[] = { - &qeth_l2_bridgeport_attr_group, - &qeth_l2_vnicc_attr_group, - NULL, -}; - -int qeth_l2_create_device_attributes(struct device *dev) -{ - return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups); -} - -void qeth_l2_remove_device_attributes(struct device *dev) -{ - sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups); -} - const struct attribute_group *qeth_l2_attr_groups[] = { - &qeth_device_attr_group, - &qeth_device_blkt_group, - /* l2 specific, see qeth_l2_only_attr_groups: */ &qeth_l2_bridgeport_attr_group, &qeth_l2_vnicc_attr_group, NULL, diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index acd130cfbab3..30c2b31d99f6 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -103,8 +103,6 @@ extern const struct attribute_group *qeth_l3_attr_groups[]; int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr, char *buf); -int qeth_l3_create_device_attributes(struct device *); -void qeth_l3_remove_device_attributes(struct device *); int qeth_l3_setrouting_v4(struct qeth_card *); int qeth_l3_setrouting_v6(struct qeth_card *); int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 264b6c782382..d138ac432d01 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1949,7 +1949,7 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) return -ENOMEM; if (gdev->dev.type == &qeth_generic_devtype) { - rc = qeth_l3_create_device_attributes(&gdev->dev); + rc = device_add_groups(&gdev->dev, qeth_l3_attr_groups); if (rc) { destroy_workqueue(card->cmd_wq); return rc; @@ -1965,7 +1965,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) struct qeth_card *card = dev_get_drvdata(&cgdev->dev); if (cgdev->dev.type == &qeth_generic_devtype) - qeth_l3_remove_device_attributes(&cgdev->dev); + device_remove_groups(&cgdev->dev, qeth_l3_attr_groups); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 997fbb7006a7..1082380b21f8 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -805,28 +805,7 @@ static const struct attribute_group qeth_device_rxip_group = { .attrs = qeth_rxip_device_attrs, }; -static const struct attribute_group *qeth_l3_only_attr_groups[] = { - &qeth_l3_device_attr_group, - &qeth_device_ipato_group, - &qeth_device_vipa_group, - &qeth_device_rxip_group, - NULL, -}; - -int qeth_l3_create_device_attributes(struct device *dev) -{ - return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups); -} - -void qeth_l3_remove_device_attributes(struct device *dev) -{ - sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups); -} - const struct attribute_group *qeth_l3_attr_groups[] = { - &qeth_device_attr_group, - &qeth_device_blkt_group, - /* l3 specific, see qeth_l3_only_attr_groups: */ &qeth_l3_device_attr_group, &qeth_device_ipato_group, &qeth_device_vipa_group, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 41cd66fc7d81..e158d3d62056 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -37,7 +37,6 @@ #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/irq_poll.h> -#include <linux/blk-mq-pci.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -114,10 +113,6 @@ unsigned int enable_sdev_max_qd; module_param(enable_sdev_max_qd, int, 0444); MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); -int host_tagset_enable = 1; -module_param(host_tagset_enable, int, 0444); -MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)"); - MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("[email protected]"); @@ -3124,19 +3119,6 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, return 0; } -static int megasas_map_queues(struct Scsi_Host *shost) -{ - struct megasas_instance *instance; - - instance = (struct megasas_instance *)shost->hostdata; - - if (shost->nr_hw_queues == 1) - return 0; - - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - instance->pdev, instance->low_latency_index_start); -} - static void megasas_aen_polling(struct work_struct *work); /** @@ -3445,7 +3427,6 @@ static struct scsi_host_template megasas_template = { .eh_timed_out = megasas_reset_timer, .shost_attrs = megaraid_host_attrs, .bios_param = megasas_bios_param, - .map_queues = megasas_map_queues, .change_queue_depth = scsi_change_queue_depth, .max_segment_size = 0xffffffff, }; @@ -6827,26 +6808,6 @@ static int megasas_io_attach(struct megasas_instance *instance) host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; - /* Use shared host tagset only for fusion adaptors - * if there are managed interrupts (smp affinity enabled case). - * Single msix_vectors in kdump, so shared host tag is also disabled. - */ - - host->host_tagset = 0; - host->nr_hw_queues = 1; - - if ((instance->adapter_type != MFI_SERIES) && - (instance->msix_vectors > instance->low_latency_index_start) && - host_tagset_enable && - instance->smp_affinity_enable) { - host->host_tagset = 1; - host->nr_hw_queues = instance->msix_vectors - - instance->low_latency_index_start; - } - - dev_info(&instance->pdev->dev, - "Max firmware commands: %d shared with nr_hw_queues = %d\n", - instance->max_fw_cmds, host->nr_hw_queues); /* * Notify the mid-layer about the new controller */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index fd607287608e..b0c01cf0428f 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -359,29 +359,24 @@ megasas_get_msix_index(struct megasas_instance *instance, { int sdev_busy; - /* TBD - if sml remove device_busy in future, driver - * should track counter in internal structure. - */ - sdev_busy = atomic_read(&scmd->device->device_busy); + /* nr_hw_queue = 1 for MegaRAID */ + struct blk_mq_hw_ctx *hctx = + scmd->device->request_queue->queue_hw_ctx[0]; + + sdev_busy = atomic_read(&hctx->nr_active); if (instance->perf_mode == MR_BALANCED_PERF_MODE && - sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) { + sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) cmd->request_desc->SCSIIO.MSIxIndex = mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); - } else if (instance->msix_load_balance) { + else if (instance->msix_load_balance) cmd->request_desc->SCSIIO.MSIxIndex = (mega_mod64(atomic64_add_return(1, &instance->total_io_count), instance->msix_vectors)); - } else if (instance->host->nr_hw_queues > 1) { - u32 tag = blk_mq_unique_tag(scmd->request); - - cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + - instance->low_latency_index_start; - } else { + else cmd->request_desc->SCSIIO.MSIxIndex = instance->reply_map[raw_smp_processor_id()]; - } } /** @@ -961,6 +956,9 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) if (megasas_alloc_cmdlist_fusion(instance)) goto fail_exit; + dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n", + instance->max_fw_cmds); + /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */ io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; @@ -1104,9 +1102,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing) instance->perf_mode = MR_BALANCED_PERF_MODE; - dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n", - MEGASAS_PERF_MODE_2STR(instance->perf_mode), - instance->low_latency_index_start); + dev_info(&instance->pdev->dev, "Performance mode :%s\n", + MEGASAS_PERF_MODE_2STR(instance->perf_mode)); instance->fw_sync_cache_support = (scratch_pad_1 & MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e4cc92bc4d94..bb940cbcbb5d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -6459,7 +6459,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) r = _base_handshake_req_reply_wait(ioc, sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, - sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10); + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30); if (r != 0) { ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 0f2b681449e6..edd26a2570fa 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -664,7 +664,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; struct _pcie_device *pcie_device = NULL; u16 smid; - u8 timeout; + unsigned long timeout; u8 issue_reset; u32 sz, sz_arg; void *psge; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 0c65fbd41035..99c8ff81de74 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1246,6 +1246,11 @@ static void storvsc_on_channel_callback(void *context) request = (struct storvsc_cmd_request *) ((unsigned long)desc->trans_id); + if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) - vmscsi_size_delta) { + dev_err(&device->device, "Invalid packet len\n"); + continue; + } + if (request == &stor_device->init_request || request == &stor_device->reset_request) { memcpy(&request->vstor_packet, packet, @@ -1994,8 +1999,10 @@ static int storvsc_probe(struct hv_device *device, alloc_ordered_workqueue("storvsc_error_wq_%d", WQ_MEM_RECLAIM, host->host_no); - if (!host_dev->handle_error_wq) + if (!host_dev->handle_error_wq) { + ret = -ENOMEM; goto err_out2; + } INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); /* Register the HBA and start the scsi bus scan */ ret = scsi_add_host(host, &device->device); diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 977ba91f4d0e..82c46b200c34 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1976,7 +1976,9 @@ static int complete_rpm(struct device *dev, void *data) static void remove_unplugged_switch(struct tb_switch *sw) { - pm_runtime_get_sync(sw->dev.parent); + struct device *parent = get_device(sw->dev.parent); + + pm_runtime_get_sync(parent); /* * Signal this and switches below for rpm_complete because @@ -1987,8 +1989,10 @@ static void remove_unplugged_switch(struct tb_switch *sw) bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); tb_switch_remove(sw); - pm_runtime_mark_last_busy(sw->dev.parent); - pm_runtime_put_autosuspend(sw->dev.parent); + pm_runtime_mark_last_busy(parent); + pm_runtime_put_autosuspend(parent); + + put_device(parent); } static void icm_free_unplugged_children(struct tb_switch *sw) diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 9f8b9a567b35..56ade99ef99f 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2897,10 +2897,14 @@ void __do_SAK(struct tty_struct *tty) struct task_struct *g, *p; struct pid *session; int i; + unsigned long flags; if (!tty) return; - session = tty->session; + + spin_lock_irqsave(&tty->ctrl_lock, flags); + session = get_pid(tty->session); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty_ldisc_flush(tty); @@ -2932,6 +2936,7 @@ void __do_SAK(struct tty_struct *tty) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + put_pid(session); #endif } diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c index 28a23a0fef21..aa6d0537b379 100644 --- a/drivers/tty/tty_jobctrl.c +++ b/drivers/tty/tty_jobctrl.c @@ -103,8 +103,8 @@ static void __proc_set_tty(struct tty_struct *tty) put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); tty->session = get_pid(task_session(current)); + spin_unlock_irqrestore(&tty->ctrl_lock, flags); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); @@ -293,20 +293,23 @@ void disassociate_ctty(int on_exit) spin_lock_irq(¤t->sighand->siglock); put_pid(current->signal->tty_old_pgrp); current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); + spin_unlock_irq(¤t->sighand->siglock); + if (tty) { unsigned long flags; + + tty_lock(tty); spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty_unlock(tty); tty_kref_put(tty); } - spin_unlock_irq(¤t->sighand->siglock); /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); @@ -477,14 +480,19 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return -ENOTTY; if (retval) return retval; - if (!current->signal->tty || - (current->signal->tty != real_tty) || - (real_tty->session != task_session(current))) - return -ENOTTY; + if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; + + spin_lock_irq(&real_tty->ctrl_lock); + if (!current->signal->tty || + (current->signal->tty != real_tty) || + (real_tty->session != task_session(current))) { + retval = -ENOTTY; + goto out_unlock_ctrl; + } rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; @@ -494,12 +502,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; - spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); - spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); +out_unlock_ctrl: + spin_unlock_irq(&real_tty->ctrl_lock); return retval; } @@ -511,20 +519,30 @@ out_unlock: * * Obtain the session id of the tty. If there is no session * return an error. - * - * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { + unsigned long flags; + pid_t sid; + /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; + + spin_lock_irqsave(&real_tty->ctrl_lock, flags); if (!real_tty->session) - return -ENOTTY; - return put_user(pid_vnr(real_tty->session), p); + goto err; + sid = pid_vnr(real_tty->session); + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + + return put_user(sid, p); + +err: + spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); + return -ENOTTY; } /* diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index a0f73d4711ae..039ab5d2435e 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -427,7 +427,6 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data) */ static int cdns3_probe(struct platform_device *pdev) { - struct usb_role_switch_desc sw_desc = { }; struct device *dev = &pdev->dev; struct resource *res; struct cdns3 *cdns; @@ -529,18 +528,21 @@ static int cdns3_probe(struct platform_device *pdev) if (ret) goto err2; - sw_desc.set = cdns3_role_set; - sw_desc.get = cdns3_role_get; - sw_desc.allow_userspace_control = true; - sw_desc.driver_data = cdns; - if (device_property_read_bool(dev, "usb-role-switch")) + if (device_property_read_bool(dev, "usb-role-switch")) { + struct usb_role_switch_desc sw_desc = { }; + + sw_desc.set = cdns3_role_set; + sw_desc.get = cdns3_role_get; + sw_desc.allow_userspace_control = true; + sw_desc.driver_data = cdns; sw_desc.fwnode = dev->fwnode; - cdns->role_sw = usb_role_switch_register(dev, &sw_desc); - if (IS_ERR(cdns->role_sw)) { - ret = PTR_ERR(cdns->role_sw); - dev_warn(dev, "Unable to register Role Switch\n"); - goto err3; + cdns->role_sw = usb_role_switch_register(dev, &sw_desc); + if (IS_ERR(cdns->role_sw)) { + ret = PTR_ERR(cdns->role_sw); + dev_warn(dev, "Unable to register Role Switch\n"); + goto err3; + } } if (cdns->wakeup_irq) { @@ -551,7 +553,7 @@ static int cdns3_probe(struct platform_device *pdev) if (ret) { dev_err(cdns->dev, "couldn't register wakeup irq handler\n"); - goto err3; + goto err4; } } @@ -582,7 +584,8 @@ static int cdns3_probe(struct platform_device *pdev) return 0; err4: cdns3_drd_exit(cdns); - usb_role_switch_unregister(cdns->role_sw); + if (cdns->role_sw) + usb_role_switch_unregister(cdns->role_sw); err3: set_phy_power_off(cdns); err2: diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 365f30fb1159..0aa85cc07ff1 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -1260,6 +1260,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, priv_req->end_trb = priv_ep->enqueue; cdns3_ep_inc_enq(priv_ep); trb = priv_ep->trb_pool + priv_ep->enqueue; + trb->length = 0; } while (sg_iter < num_trb); trb = priv_req->trb; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 046f770a76da..c727cb5de871 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1324,7 +1324,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, case FUNCTIONFS_ENDPOINT_DESC: { int desc_idx; - struct usb_endpoint_descriptor *desc; + struct usb_endpoint_descriptor desc1, *desc; switch (epfile->ffs->gadget->speed) { case USB_SPEED_SUPER: @@ -1336,10 +1336,12 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, default: desc_idx = 0; } + desc = epfile->ep->descs[desc_idx]; + memcpy(&desc1, desc, desc->bLength); spin_unlock_irq(&epfile->ffs->eps_lock); - ret = copy_to_user((void __user *)value, desc, desc->bLength); + ret = copy_to_user((void __user *)value, &desc1, desc1.bLength); if (ret) ret = -EFAULT; return ret; diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 9ccdf2c216b5..6374501ba139 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -91,14 +91,14 @@ static int omap_ohci_transceiver_power(struct ohci_omap_priv *priv, int on) | ((1 << 5/*usb1*/) | (1 << 3/*usb2*/)), INNOVATOR_FPGA_CAM_USB_CONTROL); else if (priv->power) - gpiod_set_value(priv->power, 0); + gpiod_set_value_cansleep(priv->power, 0); } else { if (machine_is_omap_innovator() && cpu_is_omap1510()) __raw_writeb(__raw_readb(INNOVATOR_FPGA_CAM_USB_CONTROL) & ~((1 << 5/*usb1*/) | (1 << 3/*usb2*/)), INNOVATOR_FPGA_CAM_USB_CONTROL); else if (priv->power) - gpiod_set_value(priv->power, 1); + gpiod_set_value_cansleep(priv->power, 1); } return 0; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index a2e2f56c88cd..28deaaec581f 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,10 +81,11 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x5512) }, + { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, - { USB_DEVICE(0x1a86, 0x5523) }, + { USB_DEVICE(0x4348, 0x5523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 5ee48b0650c4..5f6b82ebccc5 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -276,12 +276,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) priv->cfg.unknown2 = cfg->unknown2; spin_unlock_irqrestore(&priv->lock, flags); + kfree(cfg); + /* READ_ON and urb submission */ rc = usb_serial_generic_open(tty, port); - if (rc) { - retval = rc; - goto err_free_cfg; - } + if (rc) + return rc; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), @@ -324,8 +324,6 @@ err_disable_read: KLSI_TIMEOUT); err_generic_close: usb_serial_generic_close(port); -err_free_cfg: - kfree(cfg); return retval; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54ca85cc920d..56d6f6d83bd7 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8 0x0053 #define CINTERION_PRODUCT_AHXX 0x0055 #define CINTERION_PRODUCT_PLXX 0x0060 +#define CINTERION_PRODUCT_EXS82 0x006c #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 @@ -1105,9 +1106,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), - .driver_info = NUMEP2 }, - { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), + .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, @@ -1902,6 +1902,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), .driver_info = RSVD(0) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -2046,12 +2047,13 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, - { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 560efd1479ba..e5a971b83e3f 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -92,7 +92,7 @@ static int slave_alloc (struct scsi_device *sdev) static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); - struct device *dev = sdev->host->dma_dev; + struct device *dev = us->pusb_dev->bus->sysdev; /* * Many devices have trouble transferring more than 32KB at a time, diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index c8a577309e8f..652d6d6f1f36 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -837,24 +837,17 @@ static int uas_slave_alloc(struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + if (devinfo->flags & US_FL_MAX_SECTORS_64) + blk_queue_max_hw_sectors(sdev->request_queue, 64); + else if (devinfo->flags & US_FL_MAX_SECTORS_240) + blk_queue_max_hw_sectors(sdev->request_queue, 240); + return 0; } static int uas_slave_configure(struct scsi_device *sdev) { struct uas_dev_info *devinfo = sdev->hostdata; - struct device *dev = sdev->host->dma_dev; - - if (devinfo->flags & US_FL_MAX_SECTORS_64) - blk_queue_max_hw_sectors(sdev->request_queue, 64); - else if (devinfo->flags & US_FL_MAX_SECTORS_240) - blk_queue_max_hw_sectors(sdev->request_queue, 240); - else if (devinfo->udev->speed >= USB_SPEED_SUPER) - blk_queue_max_hw_sectors(sdev->request_queue, 2048); - - blk_queue_max_hw_sectors(sdev->request_queue, - min_t(size_t, queue_max_hw_sectors(sdev->request_queue), - dma_max_mapping_size(dev) >> SECTOR_SHIFT)); if (devinfo->flags & US_FL_NO_REPORT_OPCODES) sdev->no_report_opcodes = 1; @@ -1040,7 +1033,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) shost->can_queue = devinfo->qdepth - 2; usb_set_intfdata(intf, shost); - result = scsi_add_host_with_dma(shost, &intf->dev, udev->bus->sysdev); + result = scsi_add_host(shost, &intf->dev); if (result) goto free_streams; diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c2ef367cf257..94a64729dc27 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -1049,9 +1049,8 @@ int usb_stor_probe2(struct us_data *us) goto BadDevice; usb_autopm_get_interface_no_resume(us->pusb_intf); snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s", - dev_name(dev)); - result = scsi_add_host_with_dma(us_to_host(us), dev, - us->pusb_dev->bus->sysdev); + dev_name(&us->pusb_intf->dev)); + result = scsi_add_host(us_to_host(us), dev); if (result) { dev_warn(dev, "Unable to add the scsi host\n"); diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile index 89a5bededc9f..f717978c83bf 100644 --- a/drivers/vdpa/mlx5/Makefile +++ b/drivers/vdpa/mlx5/Makefile @@ -1,4 +1,4 @@ subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o -mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o +mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/mlx5_vnet.o core/resources.o core/mr.o diff --git a/drivers/vdpa/mlx5/net/main.c b/drivers/vdpa/mlx5/net/main.c deleted file mode 100644 index 838cd98386ff..000000000000 --- a/drivers/vdpa/mlx5/net/main.c +++ /dev/null @@ -1,76 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* Copyright (c) 2020 Mellanox Technologies Ltd. */ - -#include <linux/module.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/device.h> -#include "mlx5_vdpa_ifc.h" -#include "mlx5_vnet.h" - -MODULE_AUTHOR("Eli Cohen <[email protected]>"); -MODULE_DESCRIPTION("Mellanox VDPA driver"); -MODULE_LICENSE("Dual BSD/GPL"); - -static bool required_caps_supported(struct mlx5_core_dev *mdev) -{ - u8 event_mode; - u64 got; - - got = MLX5_CAP_GEN_64(mdev, general_obj_types); - - if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q)) - return false; - - event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode); - if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE)) - return false; - - if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type)) - return false; - - return true; -} - -static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev) -{ - struct mlx5_vdpa_dev *vdev; - - if (mlx5_core_is_pf(mdev)) - return NULL; - - if (!required_caps_supported(mdev)) { - dev_info(mdev->device, "virtio net emulation not supported\n"); - return NULL; - } - vdev = mlx5_vdpa_add_dev(mdev); - if (IS_ERR(vdev)) - return NULL; - - return vdev; -} - -static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context) -{ - struct mlx5_vdpa_dev *vdev = context; - - mlx5_vdpa_remove_dev(vdev); -} - -static struct mlx5_interface mlx5_vdpa_interface = { - .add = mlx5_vdpa_add, - .remove = mlx5_vdpa_remove, - .protocol = MLX5_INTERFACE_PROTOCOL_VDPA, -}; - -static int __init mlx5_vdpa_init(void) -{ - return mlx5_register_interface(&mlx5_vdpa_interface); -} - -static void __exit mlx5_vdpa_exit(void) -{ - mlx5_unregister_interface(&mlx5_vdpa_interface); -} - -module_init(mlx5_vdpa_init); -module_exit(mlx5_vdpa_exit); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 1fa6fcac8299..f1d54814db97 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1,18 +1,28 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020 Mellanox Technologies Ltd. */ +#include <linux/module.h> #include <linux/vdpa.h> +#include <linux/vringh.h> +#include <uapi/linux/virtio_net.h> #include <uapi/linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/auxiliary_bus.h> +#include <linux/mlx5/cq.h> #include <linux/mlx5/qp.h> #include <linux/mlx5/device.h> +#include <linux/mlx5/driver.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> -#include <linux/mlx5/device.h> -#include "mlx5_vnet.h" -#include "mlx5_vdpa_ifc.h" +#include <linux/mlx5/mlx5_ifc_vdpa.h> #include "mlx5_vdpa.h" +MODULE_AUTHOR("Eli Cohen <[email protected]>"); +MODULE_DESCRIPTION("Mellanox VDPA driver"); +MODULE_LICENSE("Dual BSD/GPL"); + +#define to_mlx5_vdpa_ndev(__mvdev) \ + container_of(__mvdev, struct mlx5_vdpa_net, mvdev) #define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev) #define VALID_FEATURES_MASK \ @@ -159,6 +169,11 @@ static bool mlx5_vdpa_debug; mlx5_vdpa_info(mvdev, "%s\n", #_status); \ } while (0) +static inline u32 mlx5_vdpa_max_qps(int max_vqs) +{ + return max_vqs / 2; +} + static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) { if (status & ~VALID_STATUS_MASK) @@ -1928,8 +1943,11 @@ static void init_mvqs(struct mlx5_vdpa_net *ndev) } } -void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev) +static int mlx5v_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx5_adev *madev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = madev->mdev; struct virtio_net_config *config; struct mlx5_vdpa_dev *mvdev; struct mlx5_vdpa_net *ndev; @@ -1943,7 +1961,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev) ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, 2 * mlx5_vdpa_max_qps(max_vqs)); if (IS_ERR(ndev)) - return ndev; + return PTR_ERR(ndev); ndev->mvdev.max_vqs = max_vqs; mvdev = &ndev->mvdev; @@ -1972,7 +1990,8 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev) if (err) goto err_reg; - return ndev; + dev_set_drvdata(&adev->dev, ndev); + return 0; err_reg: free_resources(ndev); @@ -1981,10 +2000,28 @@ err_res: err_mtu: mutex_destroy(&ndev->reslock); put_device(&mvdev->vdev.dev); - return ERR_PTR(err); + return err; } -void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev) +static void mlx5v_remove(struct auxiliary_device *adev) { + struct mlx5_vdpa_dev *mvdev = dev_get_drvdata(&adev->dev); + vdpa_unregister_device(&mvdev->vdev); } + +static const struct auxiliary_device_id mlx5v_id_table[] = { + { .name = MLX5_ADEV_NAME ".vnet", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5v_id_table); + +static struct auxiliary_driver mlx5v_driver = { + .name = "vnet", + .probe = mlx5v_probe, + .remove = mlx5v_remove, + .id_table = mlx5v_id_table, +}; + +module_auxiliary_driver(mlx5v_driver); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h deleted file mode 100644 index f2d6d68b020e..000000000000 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2020 Mellanox Technologies Ltd. */ - -#ifndef __MLX5_VNET_H_ -#define __MLX5_VNET_H_ - -#include <linux/vdpa.h> -#include <linux/virtio_net.h> -#include <linux/vringh.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/cq.h> -#include <linux/mlx5/qp.h> -#include "mlx5_vdpa.h" - -static inline u32 mlx5_vdpa_max_qps(int max_vqs) -{ - return max_vqs / 2; -} - -#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev) -void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev); -void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev); - -#endif /* __MLX5_VNET_H_ */ diff --git a/fs/afs/super.c b/fs/afs/super.c index 6c5900df6aa5..e38bb1e7a4d2 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -230,6 +230,9 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param) _enter(",%s", name); + if (fc->source) + return invalf(fc, "kAFS: Multiple sources not supported"); + if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 28c1459fb0fc..44f9cce57099 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -4546,7 +4546,8 @@ static void set_root_ses(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, if (ses) { spin_lock(&cifs_tcp_ses_lock); ses->ses_count++; - ses->tcon_ipc->remap = cifs_remap(cifs_sb); + if (ses->tcon_ipc) + ses->tcon_ipc->remap = cifs_remap(cifs_sb); spin_unlock(&cifs_tcp_ses_lock); } *root_ses = ses; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index dab94f67c988..3d914d7d0d11 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3114,8 +3114,8 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon, rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, - &rqst[1], fid.persistent_fid, - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, + &rqst[1], COMPOUND_FID, + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, true /* is_fctl */, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 445e80862865..acb72705062d 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2272,17 +2272,15 @@ static struct crt_sd_ctxt * create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) { struct crt_sd_ctxt *buf; - struct cifs_ace *pace; - unsigned int sdlen, acelen; + __u8 *ptr, *aclptr; + unsigned int acelen, acl_size, ace_count; unsigned int owner_offset = 0; unsigned int group_offset = 0; + struct smb3_acl acl; - *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 2), 8); + *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8); if (set_owner) { - /* offset fields are from beginning of security descriptor not of create context */ - owner_offset = sizeof(struct smb3_acl) + (sizeof(struct cifs_ace) * 2); - /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */ *len += sizeof(struct owner_group_sids); } @@ -2291,26 +2289,22 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) if (buf == NULL) return buf; + ptr = (__u8 *)&buf[1]; if (set_owner) { + /* offset fields are from beginning of security descriptor not of create context */ + owner_offset = ptr - (__u8 *)&buf->sd; buf->sd.OffsetOwner = cpu_to_le32(owner_offset); - group_offset = owner_offset + sizeof(struct owner_sid); + group_offset = owner_offset + offsetof(struct owner_group_sids, group); buf->sd.OffsetGroup = cpu_to_le32(group_offset); + + setup_owner_group_sids(ptr); + ptr += sizeof(struct owner_group_sids); } else { buf->sd.OffsetOwner = 0; buf->sd.OffsetGroup = 0; } - sdlen = sizeof(struct smb3_sd) + sizeof(struct smb3_acl) + - 2 * sizeof(struct cifs_ace); - if (set_owner) { - sdlen += sizeof(struct owner_group_sids); - setup_owner_group_sids(owner_offset + sizeof(struct create_context) + 8 /* name */ - + (char *)buf); - } - - buf->ccontext.DataOffset = cpu_to_le16(offsetof - (struct crt_sd_ctxt, sd)); - buf->ccontext.DataLength = cpu_to_le32(sdlen); + buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd)); buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name)); buf->ccontext.NameLength = cpu_to_le16(4); /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */ @@ -2319,6 +2313,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) buf->Name[2] = 'c'; buf->Name[3] = 'D'; buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */ + /* * ACL is "self relative" ie ACL is stored in contiguous block of memory * and "DP" ie the DACL is present @@ -2326,28 +2321,38 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len) buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP); /* offset owner, group and Sbz1 and SACL are all zero */ - buf->sd.OffsetDacl = cpu_to_le32(sizeof(struct smb3_sd)); - buf->acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ + buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); + /* Ship the ACL for now. we will copy it into buf later. */ + aclptr = ptr; + ptr += sizeof(struct cifs_acl); /* create one ACE to hold the mode embedded in reserved special SID */ - pace = (struct cifs_ace *)(sizeof(struct crt_sd_ctxt) + (char *)buf); - acelen = setup_special_mode_ACE(pace, (__u64)mode); + acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); + ptr += acelen; + acl_size = acelen + sizeof(struct smb3_acl); + ace_count = 1; if (set_owner) { /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */ - pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + (char *)buf)); - acelen += setup_special_user_owner_ACE(pace); - /* it does not appear necessary to add an ACE for the NFS group SID */ - buf->acl.AceCount = cpu_to_le16(3); - } else - buf->acl.AceCount = cpu_to_le16(2); + acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr); + ptr += acelen; + acl_size += acelen; + ace_count += 1; + } /* and one more ACE to allow access for authenticated users */ - pace = (struct cifs_ace *)(acelen + (sizeof(struct crt_sd_ctxt) + - (char *)buf)); - acelen += setup_authusers_ACE(pace); - - buf->acl.AclSize = cpu_to_le16(sizeof(struct cifs_acl) + acelen); + acelen = setup_authusers_ACE((struct cifs_ace *)ptr); + ptr += acelen; + acl_size += acelen; + ace_count += 1; + + acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ + acl.AclSize = cpu_to_le16(acl_size); + acl.AceCount = cpu_to_le16(ace_count); + memcpy(aclptr, &acl, sizeof(struct cifs_acl)); + + buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); + *len = ptr - (__u8 *)buf; return buf; } diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index f05f9b12f689..fa57b03ca98c 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -963,8 +963,6 @@ struct crt_sd_ctxt { struct create_context ccontext; __u8 Name[8]; struct smb3_sd sd; - struct smb3_acl acl; - /* Followed by at least 4 ACEs */ } __packed; diff --git a/fs/coredump.c b/fs/coredump.c index 0cd9056d79cc..c6acfc694f65 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -229,7 +229,8 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, */ if (ispipe) { if (isspace(*pat_ptr)) { - was_space = true; + if (cn->used != 0) + was_space = true; pat_ptr++; continue; } else if (was_space) { diff --git a/fs/io_uring.c b/fs/io_uring.c index 1023f7b44cea..a2a7c65a77aa 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4499,7 +4499,8 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, return -EFAULT; if (clen < 0) return -EINVAL; - sr->len = iomsg->iov[0].iov_len; + sr->len = clen; + iomsg->iov[0].iov_len = clen; iomsg->iov = NULL; } else { ret = __import_iovec(READ, (struct iovec __user *)uiov, len, diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 88e1763e02f3..e2a488d403a6 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -205,3 +205,12 @@ config NFS_DISABLE_UDP_SUPPORT Choose Y here to disable the use of NFS over UDP. NFS over UDP on modern networks (1Gb+) can lead to data corruption caused by fragmentation during high loads. + +config NFS_V4_2_READ_PLUS + bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation" + depends on NFS_V4_2 + default n + help + This is intended for developers only. The READ_PLUS operation has + been shown to have issues under specific conditions and should not + be used in production. diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index a163533446fa..24bf5797f88a 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -838,7 +838,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_pgio_mirror *pgm; struct nfs4_ff_layout_mirror *mirror; struct nfs4_pnfs_ds *ds; - u32 ds_idx, i; + u32 ds_idx; retry: ff_layout_pg_check_layout(pgio, req); @@ -864,11 +864,9 @@ retry: goto retry; } - for (i = 0; i < pgio->pg_mirror_count; i++) { - mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); - pgm = &pgio->pg_mirrors[i]; - pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; - } + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); + pgm = &pgio->pg_mirrors[0]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; pgio->pg_mirror_idx = ds_idx; @@ -985,6 +983,21 @@ out: return 1; } +static u32 +ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) +{ + u32 old = desc->pg_mirror_idx; + + desc->pg_mirror_idx = idx; + return old; +} + +static struct nfs_pgio_mirror * +ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) +{ + return &desc->pg_mirrors[idx]; +} + static const struct nfs_pageio_ops ff_layout_pg_read_ops = { .pg_init = ff_layout_pg_init_read, .pg_test = pnfs_generic_pg_test, @@ -998,6 +1011,8 @@ static const struct nfs_pageio_ops ff_layout_pg_write_ops = { .pg_doio = pnfs_generic_pg_writepages, .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, .pg_cleanup = pnfs_generic_pg_cleanup, + .pg_get_mirror = ff_layout_pg_get_mirror_write, + .pg_set_mirror = ff_layout_pg_set_mirror_write, }; static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 2b2211d1234e..4fc61e3d098d 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1241,12 +1241,13 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, .rpc_resp = &res, }; u32 xdrlen; - int ret, np; + int ret, np, i; + ret = -ENOMEM; res.scratch = alloc_page(GFP_KERNEL); if (!res.scratch) - return -ENOMEM; + goto out; xdrlen = nfs42_listxattr_xdrsize(buflen); if (xdrlen > server->lxasize) @@ -1254,9 +1255,12 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, np = xdrlen / PAGE_SIZE + 1; pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); - if (pages == NULL) { - __free_page(res.scratch); - return -ENOMEM; + if (!pages) + goto out_free_scratch; + for (i = 0; i < np; i++) { + pages[i] = alloc_page(GFP_KERNEL); + if (!pages[i]) + goto out_free_pages; } arg.xattr_pages = pages; @@ -1271,14 +1275,15 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, *eofp = res.eof; } +out_free_pages: while (--np >= 0) { if (pages[np]) __free_page(pages[np]); } - - __free_page(res.scratch); kfree(pages); - +out_free_scratch: + __free_page(res.scratch); +out: return ret; } diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 6e060a88f98c..8432bd6b95f0 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -1528,7 +1528,6 @@ static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req, rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, hdr.replen); - req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; encode_nops(&hdr); } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 9d354de613da..57b3821d975a 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -377,10 +377,10 @@ static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, goto out_stateowner; set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags); - set_bit(NFS_OPEN_STATE, &ctx->state->flags); memcpy(&ctx->state->open_stateid.other, &stateid->other, NFS4_STATEID_OTHER_SIZE); update_open_stateid(ctx->state, stateid, NULL, filep->f_mode); + set_bit(NFS_OPEN_STATE, &ctx->state->flags); nfs_file_set_open_context(filep, ctx); put_nfs_open_context(ctx); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9e0ca9b2b210..e89468678ae1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5309,7 +5309,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) nfs4_read_done_cb(task, hdr); } -#ifdef CONFIG_NFS_V4_2 +#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS static void nfs42_read_plus_support(struct nfs_server *server, struct rpc_message *msg) { if (server->caps & NFS_CAP_READ_PLUS) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6985cacf4700..78c9c4bdef2b 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -31,13 +31,29 @@ static struct kmem_cache *nfs_page_cachep; static const struct rpc_call_ops nfs_pgio_common_ops; +static struct nfs_pgio_mirror * +nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx) +{ + if (desc->pg_ops->pg_get_mirror) + return desc->pg_ops->pg_get_mirror(desc, idx); + return &desc->pg_mirrors[0]; +} + struct nfs_pgio_mirror * nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) { - return &desc->pg_mirrors[desc->pg_mirror_idx]; + return nfs_pgio_get_mirror(desc, desc->pg_mirror_idx); } EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror); +static u32 +nfs_pgio_set_current_mirror(struct nfs_pageio_descriptor *desc, u32 idx) +{ + if (desc->pg_ops->pg_set_mirror) + return desc->pg_ops->pg_set_mirror(desc, idx); + return desc->pg_mirror_idx; +} + void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr, void (*release)(struct nfs_pgio_header *hdr)) @@ -1259,7 +1275,7 @@ static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc) return; for (midx = 0; midx < desc->pg_mirror_count; midx++) { - mirror = &desc->pg_mirrors[midx]; + mirror = nfs_pgio_get_mirror(desc, midx); desc->pg_completion_ops->error_cleanup(&mirror->pg_list, desc->pg_error); } @@ -1293,12 +1309,12 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, goto out_failed; } - desc->pg_mirror_idx = midx; + nfs_pgio_set_current_mirror(desc, midx); if (!nfs_pageio_add_request_mirror(desc, dupreq)) goto out_cleanup_subreq; } - desc->pg_mirror_idx = 0; + nfs_pgio_set_current_mirror(desc, 0); if (!nfs_pageio_add_request_mirror(desc, req)) goto out_failed; @@ -1320,10 +1336,12 @@ out_failed: static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, u32 mirror_idx) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; - u32 restore_idx = desc->pg_mirror_idx; + struct nfs_pgio_mirror *mirror; + u32 restore_idx; + + restore_idx = nfs_pgio_set_current_mirror(desc, mirror_idx); + mirror = nfs_pgio_current_mirror(desc); - desc->pg_mirror_idx = mirror_idx; for (;;) { nfs_pageio_doio(desc); if (desc->pg_error < 0 || !mirror->pg_recoalesce) @@ -1331,7 +1349,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, if (!nfs_do_recoalesce(desc)) break; } - desc->pg_mirror_idx = restore_idx; + nfs_pgio_set_current_mirror(desc, restore_idx); } /* @@ -1405,7 +1423,7 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) u32 midx; for (midx = 0; midx < desc->pg_mirror_count; midx++) { - mirror = &desc->pg_mirrors[midx]; + mirror = nfs_pgio_get_mirror(desc, midx); if (!list_empty(&mirror->pg_list)) { prev = nfs_list_entry(mirror->pg_list.prev); if (index != prev->wb_index + 1) { diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 217aa2705d5d..ee5a235b3056 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1599,11 +1599,15 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, src = *ppos; svpfn = src / PM_ENTRY_BYTES; - start_vaddr = svpfn << PAGE_SHIFT; end_vaddr = mm->task_size; /* watch out for wraparound */ - if (svpfn > mm->task_size >> PAGE_SHIFT) + start_vaddr = end_vaddr; + if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) + start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); + + /* Ensure the address is inside the task */ + if (start_vaddr > mm->task_size) start_vaddr = end_vaddr; /* diff --git a/fs/seq_file.c b/fs/seq_file.c index 3b20e21604e7..03a369ccd28c 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -168,12 +168,14 @@ EXPORT_SYMBOL(seq_read); ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct seq_file *m = iocb->ki_filp->private_data; - size_t size = iov_iter_count(iter); size_t copied = 0; size_t n; void *p; int err = 0; + if (!iov_iter_count(iter)) + return 0; + mutex_lock(&m->lock); /* @@ -206,36 +208,34 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) if (!m->buf) goto Enomem; } - /* if not empty - flush it first */ + // something left in the buffer - copy it out first if (m->count) { - n = min(m->count, size); - if (copy_to_iter(m->buf + m->from, n, iter) != n) - goto Efault; + n = copy_to_iter(m->buf + m->from, m->count, iter); m->count -= n; m->from += n; - size -= n; copied += n; - if (!size) + if (m->count) // hadn't managed to copy everything goto Done; } - /* we need at least one record in buffer */ + // get a non-empty record in the buffer m->from = 0; p = m->op->start(m, &m->index); while (1) { err = PTR_ERR(p); - if (!p || IS_ERR(p)) + if (!p || IS_ERR(p)) // EOF or an error break; err = m->op->show(m, p); - if (err < 0) + if (err < 0) // hard error break; - if (unlikely(err)) + if (unlikely(err)) // ->show() says "skip it" m->count = 0; - if (unlikely(!m->count)) { + if (unlikely(!m->count)) { // empty record p = m->op->next(m, p, &m->index); continue; } - if (m->count < m->size) + if (!seq_has_overflowed(m)) // got it goto Fill; + // need a bigger buffer m->op->stop(m, p); kvfree(m->buf); m->count = 0; @@ -244,11 +244,14 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) goto Enomem; p = m->op->start(m, &m->index); } + // EOF or an error m->op->stop(m, p); m->count = 0; goto Done; Fill: - /* they want more? let's try to get some more */ + // one non-empty record is in the buffer; if they want more, + // try to fit more in, but in any case we need to advance + // the iterator once for every record shown. while (1) { size_t offs = m->count; loff_t pos = m->index; @@ -259,30 +262,27 @@ Fill: m->op->next); m->index++; } - if (!p || IS_ERR(p)) { - err = PTR_ERR(p); + if (!p || IS_ERR(p)) // no next record for us break; - } - if (m->count >= size) + if (m->count >= iov_iter_count(iter)) break; err = m->op->show(m, p); - if (seq_has_overflowed(m) || err) { + if (err > 0) { // ->show() says "skip it" m->count = offs; - if (likely(err <= 0)) - break; + } else if (err || seq_has_overflowed(m)) { + m->count = offs; + break; } } m->op->stop(m, p); - n = min(m->count, size); - if (copy_to_iter(m->buf, n, iter) != n) - goto Efault; + n = copy_to_iter(m->buf, m->count, iter); copied += n; m->count -= n; m->from = n; Done: - if (!copied) - copied = err; - else { + if (unlikely(!copied)) { + copied = m->count ? -EFAULT : err; + } else { iocb->ki_pos += copied; m->read_pos += copied; } @@ -291,9 +291,6 @@ Done: Enomem: err = -ENOMEM; goto Done; -Efault: - err = -EFAULT; - goto Done; } EXPORT_SYMBOL(seq_read_iter); diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index ff5930be096c..bec47f2d074b 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -691,21 +691,23 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) bio->bi_opf |= REQ_FUA; ret = bio_iov_iter_get_pages(bio, from); - if (unlikely(ret)) { - bio_io_error(bio); - return ret; - } + if (unlikely(ret)) + goto out_release; + size = bio->bi_iter.bi_size; - task_io_account_write(ret); + task_io_account_write(size); if (iocb->ki_flags & IOCB_HIPRI) bio_set_polled(bio, iocb); ret = submit_bio_wait(bio); + zonefs_file_write_dio_end_io(iocb, size, ret, 0); + +out_release: + bio_release_pages(bio, false); bio_put(bio); - zonefs_file_write_dio_end_io(iocb, size, ret, 0); if (ret >= 0) { iocb->ki_pos += size; return size; diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h new file mode 100644 index 000000000000..fc51d45f106b --- /dev/null +++ b/include/linux/auxiliary_bus.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019-2020 Intel Corporation + * + * Please see Documentation/driver-api/auxiliary_bus.rst for more information. + */ + +#ifndef _AUXILIARY_BUS_H_ +#define _AUXILIARY_BUS_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; +}; + +static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev) +{ + return container_of(dev, struct auxiliary_device, dev); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +int auxiliary_device_init(struct auxiliary_device *auxdev); +int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); +#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME) + +static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + put_device(&auxdev->dev); +} + +static inline void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + device_del(&auxdev->dev); +} + +int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *owner, + const char *modname); +#define auxiliary_driver_register(auxdrv) \ + __auxiliary_driver_register(auxdrv, THIS_MODULE, KBUILD_MODNAME) + +void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +/** + * module_auxiliary_driver() - Helper macro for registering an auxiliary driver + * @__auxiliary_driver: auxiliary driver struct + * + * Helper macro for auxiliary drivers which do not do anything special in + * module init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_auxiliary_driver(__auxiliary_driver) \ + module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) + +struct auxiliary_device *auxiliary_find_device(struct device *start, + const void *data, + int (*match)(struct device *dev, const void *data)); + +#endif /* _AUXILIARY_BUS_H_ */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 639cae2c158b..033eb5f73b65 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1073,12 +1073,15 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, * file system requests. */ static inline unsigned int blk_max_size_offset(struct request_queue *q, - sector_t offset) -{ - unsigned int chunk_sectors = q->limits.chunk_sectors; - - if (!chunk_sectors) - return q->limits.max_sectors; + sector_t offset, + unsigned int chunk_sectors) +{ + if (!chunk_sectors) { + if (q->limits.chunk_sectors) + chunk_sectors = q->limits.chunk_sectors; + else + return q->limits.max_sectors; + } if (likely(is_power_of_2(chunk_sectors))) chunk_sectors -= offset & (chunk_sectors - 1); @@ -1101,7 +1104,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, req_op(rq) == REQ_OP_SECURE_ERASE) return blk_queue_get_max_sectors(q, req_op(rq)); - return min(blk_max_size_offset(q, offset), + return min(blk_max_size_offset(q, offset, 0), blk_queue_get_max_sectors(q, req_op(rq))); } diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index e3a0be2c90ad..7bb66e15b481 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -77,4 +77,9 @@ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) +#ifdef __GENKSYMS__ +/* genksyms gets confused by _Static_assert */ +#define _Static_assert(expr, ...) +#endif + #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 46c3d691f677..de51c1bef27d 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -104,6 +104,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } +#if defined(CONFIG_UM) || defined(CONFIG_IA64) /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -118,5 +119,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); extern int elf_core_write_extra_data(struct coredump_params *cprm); extern size_t elf_core_extra_data_size(void); +#else +static inline Elf_Half elf_core_extra_phdrs(void) +{ + return 0; +} + +static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + return 1; +} + +static inline int elf_core_write_extra_data(struct coredump_params *cprm) +{ + return 1; +} + +static inline size_t elf_core_extra_data_size(void) +{ + return 0; +} +#endif #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 5e8cc9c3d45a..72ff75fb1971 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1261,6 +1261,7 @@ struct ieee80211_mgmt { #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 #define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 +#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -3835,15 +3836,15 @@ static inline bool for_each_element_completed(const struct element *element, #define WLAN_RSNX_CAPA_SAE_H2E BIT(5) /* - * reduced neighbor report, based on Draft P802.11ax_D5.0, - * section 9.4.2.170 + * reduced neighbor report, based on Draft P802.11ax_D6.1, + * section 9.4.2.170 and accepted contributions. */ #define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03 #define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 #define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 #define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13 #define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 #define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 71535e87109f..ea5a337e0f8b 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -384,11 +384,19 @@ extern void irq_domain_associate_many(struct irq_domain *domain, extern void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq); -extern unsigned int irq_create_mapping(struct irq_domain *host, - irq_hw_number_t hwirq); +extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); +static inline unsigned int irq_create_mapping(struct irq_domain *host, + irq_hw_number_t hwirq) +{ + return irq_create_mapping_affinity(host, hwirq, NULL); +} + + /** * irq_linear_revmap() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 06e066e04a4b..236a7d04f891 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -46,7 +46,6 @@ #define DEFAULT_UAR_PAGE_SHIFT 12 -#define MAX_MSIX_P_PORT 17 #define MAX_MSIX 128 #define MIN_MSIX_P_PORT 5 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index fff7173e2b82..f93bfe7473aa 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -48,6 +48,7 @@ #include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> +#include <linux/auxiliary_bus.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> @@ -56,6 +57,8 @@ #include <linux/ptp_clock_kernel.h> #include <net/devlink.h> +#define MLX5_ADEV_NAME "mlx5_core" + enum { MLX5_BOARD_ID_LEN = 64, }; @@ -534,6 +537,17 @@ struct mlx5_core_roce { struct mlx5_flow_handle *allow_rule; }; +enum { + MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, +}; + +struct mlx5_adev { + struct auxiliary_device adev; + struct mlx5_core_dev *mdev; + int idx; +}; + struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ struct mlx5_irq_table *irq_table; @@ -571,6 +585,8 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; + struct mlx5_adev **adev; + int adev_idx; struct mlx5_events *events; struct mlx5_flow_steering *steering; @@ -578,6 +594,7 @@ struct mlx5_priv { struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; + u32 flags; struct mlx5_devcom *devcom; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; @@ -1055,23 +1072,6 @@ enum { MAX_MR_CACHE_ENTRIES }; -enum { - MLX5_INTERFACE_PROTOCOL_IB = 0, - MLX5_INTERFACE_PROTOCOL_ETH = 1, - MLX5_INTERFACE_PROTOCOL_VDPA = 2, -}; - -struct mlx5_interface { - void * (*add)(struct mlx5_core_dev *dev); - void (*remove)(struct mlx5_core_dev *dev, void *context); - int (*attach)(struct mlx5_core_dev *dev, void *context); - void (*detach)(struct mlx5_core_dev *dev, void *context); - int protocol; - struct list_head list; -}; - -int mlx5_register_interface(struct mlx5_interface *intf); -void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index b0ae8020f13e..29fd832950e0 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -96,10 +96,10 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num); -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); +u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ -static inline u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { return MLX5_ESWITCH_NONE; } @@ -136,4 +136,8 @@ mlx5_eswitch_get_vport_metadata_mask(void) } #endif /* CONFIG_MLX5_ESWITCH */ +static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) +{ + return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; +} #endif diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index f6f57a29b38e..98b56b75c625 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -1,10 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2020 Mellanox Technologies Ltd. */ -#ifndef __MLX5_VDPA_IFC_H_ -#define __MLX5_VDPA_IFC_H_ - -#include <linux/mlx5/mlx5_ifc.h> +#ifndef __MLX5_IFC_VDPA_H_ +#define __MLX5_IFC_VDPA_H_ enum { MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0, @@ -165,4 +163,4 @@ struct mlx5_ifc_modify_virtio_net_q_out_bits { struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; }; -#endif /* __MLX5_VDPA_IFC_H_ */ +#endif /* __MLX5_IFC_VDPA_H_ */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5b08a473cdba..c425290b21e2 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -838,4 +838,12 @@ struct mhi_device_id { kernel_ulong_t driver_data; }; +#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_MODULE_PREFIX "auxiliary:" + +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5deb099d156d..8ebb64193757 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -227,7 +227,7 @@ struct xt_table { unsigned int valid_hooks; /* Man behind the curtain... */ - struct xt_table_info *private; + struct xt_table_info __rcu *private; /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; @@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); +struct xt_table_info +*xt_table_get_private_protected(const struct xt_table *table); + #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index c32c15216da3..f0373a6cb5fb 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -55,6 +55,7 @@ struct nfs_page { unsigned short wb_nio; /* Number of I/O attempts */ }; +struct nfs_pgio_mirror; struct nfs_pageio_descriptor; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *); @@ -64,6 +65,9 @@ struct nfs_pageio_ops { unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); + struct nfs_pgio_mirror * + (*pg_get_mirror)(struct nfs_pageio_descriptor *, u32); + u32 (*pg_set_mirror)(struct nfs_pageio_descriptor *, u32); }; struct nfs_rw_ops { diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 8ad2487a86d5..231e06b74b50 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -138,6 +138,17 @@ void rfkill_unregister(struct rfkill *rfkill); void rfkill_destroy(struct rfkill *rfkill); /** + * rfkill_set_hw_state_reason - Set the internal rfkill hardware block state + * with a reason + * @rfkill: pointer to the rfkill class to modify. + * @blocked: the current hardware block state to set + * @reason: one of &enum rfkill_hard_block_reasons + * + * Prefer to use rfkill_set_hw_state if you don't need any special reason. + */ +bool rfkill_set_hw_state_reason(struct rfkill *rfkill, + bool blocked, unsigned long reason); +/** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. * @blocked: the current hardware block state to set @@ -156,7 +167,11 @@ void rfkill_destroy(struct rfkill *rfkill); * should be blocked) so that drivers need not keep track of the soft * block state -- which they might not be able to. */ -bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return rfkill_set_hw_state_reason(rfkill, blocked, + RFKILL_HARD_BLOCK_SIGNAL); +} /** * rfkill_set_sw_state - Set the internal rfkill software block state @@ -256,6 +271,13 @@ static inline void rfkill_destroy(struct rfkill *rfkill) { } +static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill, + bool blocked, + unsigned long reason) +{ + return blocked; +} + static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) { return blocked; diff --git a/include/linux/security.h b/include/linux/security.h index 0df62735651b..0e9120865343 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -869,7 +869,7 @@ static inline int security_inode_killpriv(struct dentry *dentry) static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc) { - return -EOPNOTSUPP; + return cap_inode_getsecurity(inode, name, buffer, alloc); } static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 628e28903b8b..15ca6b4167cc 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -170,6 +170,7 @@ struct plat_stmmacenet_data { int unicast_filter_entries; int tx_fifo_size; int rx_fifo_size; + u32 addr64; u32 rx_queues_to_use; u32 tx_queues_to_use; u8 rx_sched_algorithm; diff --git a/include/linux/tty.h b/include/linux/tty.h index a99e9b8e4e31..eb33d948788c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -306,6 +306,10 @@ struct tty_struct { struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; struct pid *pgrp; /* Protected by ctrl lock */ + /* + * Writes protected by both ctrl lock and legacy mutex, readers must use + * at least one of them. + */ struct pid *session; unsigned long flags; int count; diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 0fdbf653b173..4807ca4d52e0 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -20,7 +20,6 @@ * zsmalloc mapping modes * * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected. */ enum zs_mapmode { ZS_MM_RW, /* normal read-write mapping */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index c8e67042a3b1..c1504aa3d9cf 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1797,6 +1797,13 @@ struct hci_cp_le_set_adv_set_rand_addr { bdaddr_t bdaddr; } __packed; +#define HCI_OP_LE_READ_TRANSMIT_POWER 0x204b +struct hci_rp_le_read_transmit_power { + __u8 status; + __s8 min_le_tx_power; + __s8 max_le_tx_power; +} __packed; + #define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060 struct hci_rp_le_read_buffer_size_v2 { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 9873e1c8cd16..677a8c50b2ad 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -230,6 +230,8 @@ struct adv_info { __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_AD_LENGTH]; __s8 tx_power; + __u32 min_interval; + __u32 max_interval; bdaddr_t random_addr; bool rpa_expired; struct delayed_work rpa_expired_cb; @@ -238,6 +240,8 @@ struct adv_info { #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 +#define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F + struct adv_pattern { struct list_head list; __u8 ad_type; @@ -361,6 +365,9 @@ struct hci_dev { __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; + __u16 advmon_allowlist_duration; + __u16 advmon_no_filter_duration; + __u8 enable_advmon_interleave_scan; __u16 devid_source; __u16 devid_vendor; @@ -377,6 +384,8 @@ struct hci_dev { __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; __u16 def_le_autoconnect_timeout; + __s8 min_le_tx_power; + __s8 max_le_tx_power; __u16 pkt_type; __u16 esco_type; @@ -542,6 +551,14 @@ struct hci_dev { struct delayed_work rpa_expired; bdaddr_t rpa; + enum { + INTERLEAVE_SCAN_NONE, + INTERLEAVE_SCAN_NO_FILTER, + INTERLEAVE_SCAN_ALLOWLIST + } interleave_scan_state; + + struct delayed_work interleave_scan; + #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif @@ -1290,7 +1307,11 @@ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration); + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval); +int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, + u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data); int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 6b55155e05e9..f9a6638e20b3 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -574,6 +574,10 @@ struct mgmt_rp_add_advertising { #define MGMT_ADV_FLAG_SEC_CODED BIT(9) #define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10) #define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11) +#define MGMT_ADV_PARAM_DURATION BIT(12) +#define MGMT_ADV_PARAM_TIMEOUT BIT(13) +#define MGMT_ADV_PARAM_INTERVALS BIT(14) +#define MGMT_ADV_PARAM_TX_POWER BIT(15) #define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \ MGMT_ADV_FLAG_SEC_CODED) @@ -621,7 +625,7 @@ struct mgmt_cp_set_appearance { #define MGMT_SET_APPEARANCE_SIZE 2 #define MGMT_OP_GET_PHY_CONFIGURATION 0x0044 -struct mgmt_rp_get_phy_confguration { +struct mgmt_rp_get_phy_configuration { __le32 supported_phys; __le32 configurable_phys; __le32 selected_phys; @@ -658,7 +662,7 @@ struct mgmt_rp_get_phy_confguration { MGMT_PHY_LE_CODED_RX) #define MGMT_OP_SET_PHY_CONFIGURATION 0x0045 -struct mgmt_cp_set_phy_confguration { +struct mgmt_cp_set_phy_configuration { __le32 selected_phys; } __packed; #define MGMT_SET_PHY_CONFIGURATION_SIZE 4 @@ -682,11 +686,16 @@ struct mgmt_cp_set_blocked_keys { #define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047 -#define MGMT_OP_READ_SECURITY_INFO 0x0048 -#define MGMT_READ_SECURITY_INFO_SIZE 0 -struct mgmt_rp_read_security_info { - __le16 sec_len; - __u8 sec[]; +#define MGMT_CAP_SEC_FLAGS 0x01 +#define MGMT_CAP_MAX_ENC_KEY_SIZE 0x02 +#define MGMT_CAP_SMP_MAX_ENC_KEY_SIZE 0x03 +#define MGMT_CAP_LE_TX_PWR 0x04 + +#define MGMT_OP_READ_CONTROLLER_CAP 0x0048 +#define MGMT_READ_CONTROLLER_CAP_SIZE 0 +struct mgmt_rp_read_controller_cap { + __le16 cap_len; + __u8 cap[0]; } __packed; #define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049 @@ -782,6 +791,36 @@ struct mgmt_rp_remove_adv_monitor { __le16 monitor_handle; } __packed; +#define MGMT_OP_ADD_EXT_ADV_PARAMS 0x0054 +struct mgmt_cp_add_ext_adv_params { + __u8 instance; + __le32 flags; + __le16 duration; + __le16 timeout; + __le32 min_interval; + __le32 max_interval; + __s8 tx_power; +} __packed; +#define MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE 18 +struct mgmt_rp_add_ext_adv_params { + __u8 instance; + __s8 tx_power; + __u8 max_adv_data_len; + __u8 max_scan_rsp_len; +} __packed; + +#define MGMT_OP_ADD_EXT_ADV_DATA 0x0055 +struct mgmt_cp_add_ext_adv_data { + __u8 instance; + __u8 adv_data_len; + __u8 scan_rsp_len; + __u8 data[]; +} __packed; +#define MGMT_ADD_EXT_ADV_DATA_SIZE 3 +struct mgmt_rp_add_ext_adv_data { + __u8 instance; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bonding.h b/include/net/bonding.h index d9d0ff3b0ad3..adc3da776970 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -86,10 +86,8 @@ #define bond_for_each_slave_rcu(bond, pos, iter) \ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) -#ifdef CONFIG_XFRM_OFFLOAD #define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ NETIF_F_GSO_ESP) -#endif /* CONFIG_XFRM_OFFLOAD */ #ifdef CONFIG_NET_POLL_CONTROLLER extern atomic_t netpoll_block_tx; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 78c763dfc99a..9a4bbccddc7f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -303,19 +303,6 @@ struct cfg80211_he_bss_color { }; /** - * struct ieee80211_he_bss_color - AP settings for BSS coloring - * - * @color: the current color. - * @disabled: is the feature disabled. - * @partial: define the AID equation. - */ -struct ieee80211_he_bss_color { - u8 color; - bool disabled; - bool partial; -}; - -/** * struct ieee80211_sta_ht_cap - STA's HT capabilities * * This structure describes most essential parameters needed @@ -1187,6 +1174,7 @@ enum cfg80211_ap_settings_flags { * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time * @he_required: stations must support HE + * @sae_h2e_required: stations must support direct H2E technique in SAE * @flags: flags, as defined in enum cfg80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings * @he_bss_color: BSS Color settings @@ -1218,7 +1206,7 @@ struct cfg80211_ap_settings { const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; const struct ieee80211_he_operation *he_oper; - bool ht_required, vht_required, he_required; + bool ht_required, vht_required, he_required, sae_h2e_required; bool twt_responder; u32 flags; struct ieee80211_he_obss_pd he_obss_pd; @@ -1744,6 +1732,54 @@ struct station_info { u8 connected_to_as; }; +/** + * struct cfg80211_sar_sub_specs - sub specs limit + * @power: power limitation in 0.25dbm + * @freq_range_index: index the power limitation applies to + */ +struct cfg80211_sar_sub_specs { + s32 power; + u32 freq_range_index; +}; + +/** + * struct cfg80211_sar_specs - sar limit specs + * @type: it's set with power in 0.25dbm or other types + * @num_sub_specs: number of sar sub specs + * @sub_specs: memory to hold the sar sub specs + */ +struct cfg80211_sar_specs { + enum nl80211_sar_type type; + u32 num_sub_specs; + struct cfg80211_sar_sub_specs sub_specs[]; +}; + + +/** + * @struct cfg80211_sar_chan_ranges - sar frequency ranges + * @start_freq: start range edge frequency + * @end_freq: end range edge frequency + */ +struct cfg80211_sar_freq_ranges { + u32 start_freq; + u32 end_freq; +}; + +/** + * struct cfg80211_sar_capa - sar limit capability + * @type: it's set via power in 0.25dbm or other types + * @num_freq_ranges: number of frequency ranges + * @freq_ranges: memory to hold the freq ranges. + * + * Note: WLAN driver may append new ranges or split an existing + * range to small ones and then append them. + */ +struct cfg80211_sar_capa { + enum nl80211_sar_type type; + u32 num_freq_ranges; + const struct cfg80211_sar_freq_ranges *freq_ranges; +}; + #if IS_ENABLED(CONFIG_CFG80211) /** * cfg80211_get_station - retrieve information about a given station @@ -4261,6 +4297,8 @@ struct cfg80211_ops { struct cfg80211_tid_config *tid_conf); int (*reset_tid_config)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, u8 tids); + int (*set_sar_specs)(struct wiphy *wiphy, + struct cfg80211_sar_specs *sar); }; /* @@ -5029,6 +5067,8 @@ struct wiphy { u8 max_data_retry_count; + const struct cfg80211_sar_capa *sar_capa; + char priv[] __aligned(NETDEV_ALIGN); }; @@ -6418,13 +6458,15 @@ void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss); * @dev: network device * @buf: 802.11 frame (header + body) * @len: length of the frame data + * @reconnect: immediate reconnect is desired (include the nl80211 attribute) * * This function is called whenever deauthentication has been processed in * station mode. This includes both received deauthentication frames and * locally generated ones. This function may sleep. The caller must hold the * corresponding wdev's mutex. */ -void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len, + bool reconnect); /** * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame @@ -7531,6 +7573,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev, * @dev: the device on which the channel switch started * @chandef: the future channel definition * @count: the number of TBTTs until the channel switch happens + * @quiet: whether or not immediate quiet was requested by the AP * * Inform the userspace about the channel switch that has just * started, so that it can take appropriate actions (eg. starting @@ -7538,7 +7581,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev, */ void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, - u8 count); + u8 count, bool quiet); /** * ieee80211_operating_class_to_band - convert operating class to band diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 05c7524bab26..d315740581f1 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -635,9 +635,7 @@ struct ieee80211_fils_discovery { struct ieee80211_bss_conf { const u8 *bssid; u8 htc_trig_based_pkt_ext; - bool multi_sta_back_32bit; bool uora_exists; - bool ack_enabled; u8 uora_ocw_range; u16 frame_time_rts_th; bool he_support; @@ -4197,6 +4195,8 @@ struct ieee80211_ops { struct ieee80211_vif *vif); void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled); + int (*set_sar_specs)(struct ieee80211_hw *hw, + const struct cfg80211_sar_specs *sar); }; /** @@ -5324,6 +5324,26 @@ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp); /** + * ieee80211_key_mic_failure - increment MIC failure counter for the key + * + * Note: this is really only safe if no other RX function is called + * at the same time. + * + * @keyconf: the key in question + */ +void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf); + +/** + * ieee80211_key_replay - increment replay counter for the key + * + * Note: this is really only safe if no other RX function is called + * at the same time. + * + * @keyconf: the key in question + */ +void ieee80211_key_replay(struct ieee80211_key_conf *keyconf); + +/** * ieee80211_wake_queue - wake specific queue * @hw: pointer as obtained from ieee80211_alloc_hw(). * @queue: queue number (counted from zero). @@ -5882,6 +5902,17 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif); void ieee80211_connection_loss(struct ieee80211_vif *vif); /** + * ieee80211_disconnect - request disconnection + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @reconnect: immediate reconnect is desired + * + * Request disconnection from the current network and, if enabled, send a + * hint to the higher layers that immediate reconnect is desired. + */ +void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect); + +/** * ieee80211_resume_disconnect - disconnect from AP after resume * * @vif: &struct ieee80211_vif pointer from the add_interface callback. diff --git a/include/net/mptcp.h b/include/net/mptcp.h index b6cf07143a8a..5694370be3d4 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -46,6 +46,7 @@ struct mptcp_out_options { #endif }; u8 addr_id; + u16 port; u64 ahmac; u8 rm_id; u8 join_id; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 55b4cadf290a..c1c0a4ff92ae 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1524,4 +1524,8 @@ void __init nft_chain_route_init(void); void nft_chain_route_fini(void); void nf_tables_trans_destroy_flush_work(void); + +int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); +__be64 nf_jiffies64_to_msecs(u64 input); + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/xdp.h b/include/net/xdp.h index 700ad5db7f5d..600acb307db6 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -256,8 +256,6 @@ struct xdp_attachment_info { }; struct netdev_bpf; -bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, - struct netdev_bpf *bpf); void xdp_attachment_setup(struct xdp_attachment_info *info, struct netdev_bpf *bpf); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index ea1de185f2e4..731116611390 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -621,6 +621,9 @@ struct ocelot { /* Keep track of the vlan port masks */ u32 vlan_mask[VLAN_N_VID]; + /* Switches like VSC9959 have flooding per traffic class */ + int num_flooding_pgids; + /* In tables like ANA:PORT and the ANA:PGID:PGID mask, * the CPU is located after the physical ports (at the * num_phys_ports index). diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 1233f14f659f..7e9931b6c735 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3977,8 +3977,8 @@ union bpf_attr { FN(seq_printf_btf), \ FN(skb_cgroup_classid), \ FN(redirect_neigh), \ - FN(bpf_per_cpu_ptr), \ - FN(bpf_this_cpu_ptr), \ + FN(per_cpu_ptr), \ + FN(this_cpu_ptr), \ FN(redirect_peer), \ FN(task_storage_get), \ FN(task_storage_delete), \ diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index 7793b26aa154..c55935b64ccc 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -135,7 +135,7 @@ struct can_isotp_ll_options { #define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */ #define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */ #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ - +#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ /* default values */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 3e0d4a038ab6..40832d13c2f1 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1178,6 +1178,10 @@ * includes the contents of the frame. %NL80211_ATTR_ACK flag is included * if the recipient acknowledged the frame. * + * @NL80211_CMD_SET_SAR_SPECS: SAR power limitation configuration is + * passed using %NL80211_ATTR_SAR_SPEC. %NL80211_ATTR_WIPHY is used to + * specify the wiphy index to be applied to. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1408,6 +1412,8 @@ enum nl80211_commands { NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS, + NL80211_CMD_SET_SAR_SPECS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2079,7 +2085,8 @@ enum nl80211_commands { * until the channel switch event. * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission * must be blocked on the current channel (before the channel switch - * operation). + * operation). Also included in the channel switch started event if quiet + * was requested by the AP. * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information * for the time while performing a channel switch. * @NL80211_ATTR_CNTDWN_OFFS_BEACON: An array of offsets (u16) to the channel @@ -2534,6 +2541,15 @@ enum nl80211_commands { * This is a u8 attribute that encapsulates one of the values from * &enum nl80211_sae_pwe_mechanism. * + * @NL80211_ATTR_SAR_SPEC: SAR power limitation specification when + * used with %NL80211_CMD_SET_SAR_SPECS. The message contains fields + * of %nl80211_sar_attrs which specifies the sar type and related + * sar specs. Sar specs contains array of %nl80211_sar_specs_attrs. + * + * @NL80211_ATTR_RECONNECT_REQUESTED: flag attribute, used with deauth and + * disassoc events to indicate that an immediate reconnect to the AP + * is desired. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3025,6 +3041,10 @@ enum nl80211_attrs { NL80211_ATTR_SAE_PWE, + NL80211_ATTR_RECONNECT_REQUESTED, + + NL80211_ATTR_SAR_SPEC, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -7156,4 +7176,96 @@ enum nl80211_sae_pwe_mechanism { NL80211_SAE_PWE_HASH_TO_ELEMENT, NL80211_SAE_PWE_BOTH, }; + +/** + * enum nl80211_sar_type - type of SAR specs + * + * @NL80211_SAR_TYPE_POWER: power limitation specified in 0.25dBm unit + * + */ +enum nl80211_sar_type { + NL80211_SAR_TYPE_POWER, + + /* add new type here */ + + /* Keep last */ + NUM_NL80211_SAR_TYPE, +}; + +/** + * enum nl80211_sar_attrs - Attributes for SAR spec + * + * @NL80211_SAR_ATTR_TYPE: the SAR type as defined in &enum nl80211_sar_type. + * + * @NL80211_SAR_ATTR_SPECS: Nested array of SAR power + * limit specifications. Each specification contains a set + * of %nl80211_sar_specs_attrs. + * + * For SET operation, it contains array of %NL80211_SAR_ATTR_SPECS_POWER + * and %NL80211_SAR_ATTR_SPECS_RANGE_INDEX. + * + * For sar_capa dump, it contains array of + * %NL80211_SAR_ATTR_SPECS_START_FREQ + * and %NL80211_SAR_ATTR_SPECS_END_FREQ. + * + * @__NL80211_SAR_ATTR_LAST: Internal + * @NL80211_SAR_ATTR_MAX: highest sar attribute + * + * These attributes are used with %NL80211_CMD_SET_SAR_SPEC + */ +enum nl80211_sar_attrs { + __NL80211_SAR_ATTR_INVALID, + + NL80211_SAR_ATTR_TYPE, + NL80211_SAR_ATTR_SPECS, + + __NL80211_SAR_ATTR_LAST, + NL80211_SAR_ATTR_MAX = __NL80211_SAR_ATTR_LAST - 1, +}; + +/** + * enum nl80211_sar_specs_attrs - Attributes for SAR power limit specs + * + * @NL80211_SAR_ATTR_SPECS_POWER: Required (s32)value to specify the actual + * power limit value in units of 0.25 dBm if type is + * NL80211_SAR_TYPE_POWER. (i.e., a value of 44 represents 11 dBm). + * 0 means userspace doesn't have SAR limitation on this associated range. + * + * @NL80211_SAR_ATTR_SPECS_RANGE_INDEX: Required (u32) value to specify the + * index of exported freq range table and the associated power limitation + * is applied to this range. + * + * Userspace isn't required to set all the ranges advertised by WLAN driver, + * and userspace can skip some certain ranges. These skipped ranges don't + * have SAR limitations, and they are same as setting the + * %NL80211_SAR_ATTR_SPECS_POWER to any unreasonable high value because any + * value higher than regulatory allowed value just means SAR power + * limitation is removed, but it's required to set at least one range. + * It's not allowed to set duplicated range in one SET operation. + * + * Every SET operation overwrites previous SET operation. + * + * @NL80211_SAR_ATTR_SPECS_START_FREQ: Required (u32) value to specify the start + * frequency of this range edge when registering SAR capability to wiphy. + * It's not a channel center frequency. The unit is kHz. + * + * @NL80211_SAR_ATTR_SPECS_END_FREQ: Required (u32) value to specify the end + * frequency of this range edge when registering SAR capability to wiphy. + * It's not a channel center frequency. The unit is kHz. + * + * @__NL80211_SAR_ATTR_SPECS_LAST: Internal + * @NL80211_SAR_ATTR_SPECS_MAX: highest sar specs attribute + */ +enum nl80211_sar_specs_attrs { + __NL80211_SAR_ATTR_SPECS_INVALID, + + NL80211_SAR_ATTR_SPECS_POWER, + NL80211_SAR_ATTR_SPECS_RANGE_INDEX, + NL80211_SAR_ATTR_SPECS_START_FREQ, + NL80211_SAR_ATTR_SPECS_END_FREQ, + + __NL80211_SAR_ATTR_SPECS_LAST, + NL80211_SAR_ATTR_SPECS_MAX = __NL80211_SAR_ATTR_SPECS_LAST - 1, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h index 7bd2a5a75348..8dbecb3ad036 100644 --- a/include/uapi/linux/ppp-ioctl.h +++ b/include/uapi/linux/ppp-ioctl.h @@ -115,6 +115,8 @@ struct pppol2tp_ioc_stats { #define PPPIOCATTCHAN _IOW('t', 56, int) /* attach to ppp channel */ #define PPPIOCGCHAN _IOR('t', 55, int) /* get ppp channel number */ #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats) +#define PPPIOCBRIDGECHAN _IOW('t', 53, int) /* bridge one channel to another */ +#define PPPIOCUNBRIDGECHAN _IO('t', 54) /* unbridge channel */ #define SIOCGPPPSTATS (SIOCDEVPRIVATE + 0) #define SIOCGPPPVER (SIOCDEVPRIVATE + 1) /* NEVER change this!! */ diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 2e00dcebebd0..03e8af87b364 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -70,12 +70,24 @@ enum rfkill_operation { }; /** + * enum rfkill_hard_block_reasons - hard block reasons + * @RFKILL_HARD_BLOCK_SIGNAL: the hardware rfkill signal is active + * @RFKILL_HARD_BLOCK_NOT_OWNER: the NIC is not owned by the host + */ +enum rfkill_hard_block_reasons { + RFKILL_HARD_BLOCK_SIGNAL = 1 << 0, + RFKILL_HARD_BLOCK_NOT_OWNER = 1 << 1, +}; + +/** * struct rfkill_event - events for userspace on /dev/rfkill * @idx: index of dev rfkill * @type: type of the rfkill struct * @op: operation code * @hard: hard state (0/1) * @soft: soft state (0/1) + * @hard_block_reasons: valid if hard is set. One or several reasons from + * &enum rfkill_hard_block_reasons. * * Structure used for userspace communication on /dev/rfkill, * used for events from the kernel and control to the kernel. @@ -84,7 +96,9 @@ struct rfkill_event { __u32 idx; __u8 type; __u8 op; - __u8 soft, hard; + __u8 soft; + __u8 hard; + __u8 hard_block_reasons; } __attribute__((packed)); /* diff --git a/init/Kconfig b/init/Kconfig index 02d13ae27abb..0872a5a2e759 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -47,6 +47,10 @@ config CLANG_VERSION int default $(shell,$(srctree)/scripts/clang-version.sh $(CC)) +config LLD_VERSION + int + default $(shell,$(srctree)/scripts/lld-version.sh $(LD)) + config CC_CAN_LINK bool default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT @@ -1348,6 +1352,12 @@ config LD_DEAD_CODE_DATA_ELIMINATION present. This option is not well tested yet, so use at your own risk. +config LD_ORPHAN_WARN + def_bool y + depends on ARCH_WANT_LD_ORPHAN_WARN + depends on !LD_IS_LLD || LLD_VERSION >= 110000 + depends on $(ld-option,--orphan-handling=warn) + config SYSCTL bool diff --git a/init/initramfs.c b/init/initramfs.c index 1f97c0328a7a..55b74d7e5260 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -535,7 +535,7 @@ extern unsigned long __initramfs_size; #include <linux/initrd.h> #include <linux/kexec.h> -void __weak free_initrd_mem(unsigned long start, unsigned long end) +void __weak __init free_initrd_mem(unsigned long start, unsigned long end) { #ifdef CONFIG_ARCH_KEEP_MEMBLOCK unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); diff --git a/kernel/Makefile b/kernel/Makefile index af601b9bda0e..6c9f19911be0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -97,7 +97,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o obj-$(CONFIG_TRACEPOINTS) += tracepoint.o obj-$(CONFIG_LATENCYTOP) += latencytop.o -obj-$(CONFIG_ELFCORE) += elfcore.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_TRACE_CLOCK) += trace/ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 2c395deae279..bd8a3183d030 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -730,9 +730,9 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_snprintf_btf_proto; case BPF_FUNC_jiffies64: return &bpf_jiffies64_proto; - case BPF_FUNC_bpf_per_cpu_ptr: + case BPF_FUNC_per_cpu_ptr: return &bpf_per_cpu_ptr_proto; - case BPF_FUNC_bpf_this_cpu_ptr: + case BPF_FUNC_this_cpu_ptr: return &bpf_this_cpu_ptr_proto; default: break; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 93def76cf32b..d43b30c92c7d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1299,9 +1299,7 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg) static bool __reg64_bound_s32(s64 a) { - if (a > S32_MIN && a < S32_MAX) - return true; - return false; + return a > S32_MIN && a < S32_MAX; } static bool __reg64_bound_u32(u64 a) @@ -1315,10 +1313,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg) { __mark_reg32_unbounded(reg); - if (__reg64_bound_s32(reg->smin_value)) + if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { reg->s32_min_value = (s32)reg->smin_value; - if (__reg64_bound_s32(reg->smax_value)) reg->s32_max_value = (s32)reg->smax_value; + } if (__reg64_bound_u32(reg->umin_value)) reg->u32_min_value = (u32)reg->umin_value; if (__reg64_bound_u32(reg->umax_value)) @@ -4942,6 +4940,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, ret_reg->smax_value = meta->msize_max_value; ret_reg->s32_max_value = meta->msize_max_value; + ret_reg->smin_value = -MAX_ERRNO; + ret_reg->s32_min_value = -MAX_ERRNO; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); __update_reg_bounds(ret_reg); diff --git a/kernel/cpu.c b/kernel/cpu.c index 6ff2578ecf17..2b8d7a5db383 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -815,6 +815,10 @@ void __init cpuhp_threads_init(void) } #ifdef CONFIG_HOTPLUG_CPU +#ifndef arch_clear_mm_cpumask_cpu +#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) +#endif + /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id @@ -850,7 +854,7 @@ void clear_tasks_mm_cpumask(int cpu) t = find_lock_task_mm(p); if (!t) continue; - cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); + arch_clear_mm_cpumask_cpu(cpu, t->mm); task_unlock(t); } rcu_read_unlock(); diff --git a/kernel/elfcore.c b/kernel/elfcore.c deleted file mode 100644 index 57fb4dcff434..000000000000 --- a/kernel/elfcore.c +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/elf.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/binfmts.h> -#include <linux/elfcore.h> - -Elf_Half __weak elf_core_extra_phdrs(void) -{ - return 0; -} - -int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) -{ - return 1; -} - -int __weak elf_core_write_extra_data(struct coredump_params *cprm) -{ - return 1; -} - -size_t __weak elf_core_extra_data_size(void) -{ - return 0; -} diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf8b374b892d..e4ca69608f3b 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -624,17 +624,19 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) EXPORT_SYMBOL_GPL(irq_create_direct_mapping); /** - * irq_create_mapping() - Map a hardware interrupt into linux irq space + * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space * @domain: domain owning this hardware interrupt or NULL for default domain * @hwirq: hardware irq number in that domain space + * @affinity: irq affinity * * Only one mapping per hardware interrupt is permitted. Returns a linux * irq number. * If the sense/trigger is to be specified, set_irq_type() should be called * on the number returned from that call. */ -unsigned int irq_create_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) +unsigned int irq_create_mapping_affinity(struct irq_domain *domain, + irq_hw_number_t hwirq, + const struct irq_affinity_desc *affinity) { struct device_node *of_node; int virq; @@ -660,7 +662,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, } /* Allocate a virtual interrupt number */ - virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); + virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), + affinity); if (virq <= 0) { pr_debug("-> virq allocation failed\n"); return 0; @@ -676,7 +679,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain, return virq; } -EXPORT_SYMBOL_GPL(irq_create_mapping); +EXPORT_SYMBOL_GPL(irq_create_mapping_affinity); /** * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index cb9d7478ef0c..fb2fbcbf6de6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1362,9 +1362,9 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; - case BPF_FUNC_bpf_per_cpu_ptr: + case BPF_FUNC_per_cpu_ptr: return &bpf_per_cpu_ptr_proto; - case BPF_FUNC_bpf_this_cpu_ptr: + case BPF_FUNC_this_cpu_ptr: return &bpf_this_cpu_ptr_proto; default: return NULL; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d53c5bdea3e..06134189e9a7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -163,7 +163,8 @@ static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ int tracing_set_tracer(struct trace_array *tr, const char *buf); -static void ftrace_trace_userstack(struct trace_buffer *buffer, +static void ftrace_trace_userstack(struct trace_array *tr, + struct trace_buffer *buffer, unsigned long flags, int pc); #define MAX_TRACER_SIZE 100 @@ -2870,7 +2871,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs); - ftrace_trace_userstack(buffer, flags, pc); + ftrace_trace_userstack(tr, buffer, flags, pc); } /* @@ -3056,13 +3057,14 @@ EXPORT_SYMBOL_GPL(trace_dump_stack); static DEFINE_PER_CPU(int, user_stack_count); static void -ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc) +ftrace_trace_userstack(struct trace_array *tr, + struct trace_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; - if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) + if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* @@ -3101,7 +3103,8 @@ ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc) preempt_enable(); } #else /* CONFIG_USER_STACKTRACE_SUPPORT */ -static void ftrace_trace_userstack(struct trace_buffer *buffer, +static void ftrace_trace_userstack(struct trace_array *tr, + struct trace_buffer *buffer, unsigned long flags, int pc) { } diff --git a/lib/Makefile b/lib/Makefile index ce45af50983a..d415fc7067c5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -107,7 +107,7 @@ obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o # off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS # get appended last to CFLAGS and thus override those previous compiler options. # -FPU_CFLAGS := -mhard-float -msse -msse2 +FPU_CFLAGS := -msse -msse2 ifdef CONFIG_CC_IS_GCC # Stack alignment mismatch, proceed with caution. # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 @@ -120,6 +120,7 @@ ifdef CONFIG_CC_IS_GCC # -mpreferred-stack-boundary=3 is not between 4 and 12 # # can be triggered. Otherwise gcc doesn't complain. +FPU_CFLAGS += -mhard-float FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4) endif diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c index aa9ef23474df..db107016d29b 100644 --- a/lib/zlib_dfltcc/dfltcc_inflate.c +++ b/lib/zlib_dfltcc/dfltcc_inflate.c @@ -4,6 +4,7 @@ #include "dfltcc_util.h" #include "dfltcc.h" #include <asm/setup.h> +#include <linux/export.h> #include <linux/zutil.h> /* @@ -29,6 +30,7 @@ int dfltcc_can_inflate( return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) && is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0); } +EXPORT_SYMBOL(dfltcc_can_inflate); static int dfltcc_was_inflate_used( z_streamp strm @@ -147,3 +149,4 @@ dfltcc_inflate_action dfltcc_inflate( return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ? DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE; } +EXPORT_SYMBOL(dfltcc_inflate); diff --git a/mm/Kconfig b/mm/Kconfig index d42423f884a7..390165ffbb0f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -707,19 +707,6 @@ config ZSMALLOC returned by an alloc(). This handle must be mapped in order to access the allocated space. -config ZSMALLOC_PGTABLE_MAPPING - bool "Use page table mapping to access object in zsmalloc" - depends on ZSMALLOC=y - help - By default, zsmalloc uses a copy-based object mapping method to - access allocations that span two pages. However, if a particular - architecture (ex, ARM) performs VM mapping faster than copying, - then you should select this. This causes zsmalloc to use page table - mapping rather than copying for object mapping. - - You can check speed with zsmalloc benchmark: - https://github.com/spartacus06/zsmapbench - config ZSMALLOC_STAT bool "Export zsmalloc statistics" depends on ZSMALLOC diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 37f15c3c24dc..d029d938d26d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1216,6 +1216,7 @@ static void destroy_compound_gigantic_page(struct page *page, } set_compound_order(page, 0); + page[1].compound_nr = 0; __ClearPageHead(page); } diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 1f87aec9ab5c..9182848dda3e 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -82,11 +82,8 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) for (idx = 0; idx < hugetlb_max_hstate; idx++) { if (page_counter_read( - hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) || - page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd( - h_cg, idx))) { + hugetlb_cgroup_counter_from_cgroup(h_cg, idx))) return true; - } } return false; } @@ -202,9 +199,10 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); struct hstate *h; struct page *page; - int idx = 0; + int idx; do { + idx = 0; for_each_hstate(h) { spin_lock(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_activelist, lru) diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 4c5375810449..0e3f8494628f 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -29,6 +29,7 @@ #include <linux/srcu.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/cpuhotplug.h> #include "../slab.h" #include "kasan.h" @@ -43,6 +44,7 @@ struct qlist_head { struct qlist_node *head; struct qlist_node *tail; size_t bytes; + bool offline; }; #define QLIST_INIT { NULL, NULL, 0 } @@ -188,6 +190,10 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) local_irq_save(flags); q = this_cpu_ptr(&cpu_quarantine); + if (q->offline) { + local_irq_restore(flags); + return; + } qlist_put(q, &info->quarantine_link, cache->size); if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { qlist_move_all(q, &temp); @@ -328,3 +334,36 @@ void quarantine_remove_cache(struct kmem_cache *cache) synchronize_srcu(&remove_cache_srcu); } + +static int kasan_cpu_online(unsigned int cpu) +{ + this_cpu_ptr(&cpu_quarantine)->offline = false; + return 0; +} + +static int kasan_cpu_offline(unsigned int cpu) +{ + struct qlist_head *q; + + q = this_cpu_ptr(&cpu_quarantine); + /* Ensure the ordering between the writing to q->offline and + * qlist_free_all. Otherwise, cpu_quarantine may be corrupted + * by interrupt. + */ + WRITE_ONCE(q->offline, true); + barrier(); + qlist_free_all(q, NULL); + return 0; +} + +static int __init kasan_cpu_quarantine_init(void) +{ + int ret = 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", + kasan_cpu_online, kasan_cpu_offline); + if (ret < 0) + pr_err("kasan cpu quarantine register failed [%d]\n", ret); + return ret; +} +late_initcall(kasan_cpu_quarantine_init); diff --git a/mm/list_lru.c b/mm/list_lru.c index 5aa6e44bc2ae..fe230081690b 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -534,7 +534,6 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; struct list_lru_one *src, *dst; - bool set; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, @@ -546,11 +545,12 @@ static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, dst = list_lru_from_memcg_idx(nlru, dst_idx); list_splice_init(&src->list, &dst->list); - set = (!dst->nr_items && src->nr_items); - dst->nr_items += src->nr_items; - if (set) + + if (src->nr_items) { + dst->nr_items += src->nr_items; memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); - src->nr_items = 0; + src->nr_items = 0; + } spin_unlock_irq(&nlru->lock); } diff --git a/mm/madvise.c b/mm/madvise.c index a8d8d48a57fe..13f5677b9322 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1204,8 +1204,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, goto put_pid; } - if (task->mm != current->mm && - !process_madvise_behavior_valid(behavior)) { + if (!process_madvise_behavior_valid(behavior)) { ret = -EINVAL; goto release_task; } diff --git a/mm/mmap.c b/mm/mmap.c index d91ecb00d38c..5c8b4485860d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1808,6 +1808,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (error) goto unmap_and_free_vma; + /* Can addr have changed?? + * + * Answer: Yes, several device drivers can do it in their + * f_op->mmap method. -DaveM + * Bug: If addr is changed, prev, rb_link, rb_parent should + * be updated for vma_link() + */ + WARN_ON_ONCE(addr != vma->vm_start); + + addr = vma->vm_start; + /* If vm_flags changed after call_mmap(), we should try merge vma again * as we may succeed this time. */ @@ -1822,25 +1833,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, fput(vma->vm_file); vm_area_free(vma); vma = merge; - /* Update vm_flags and possible addr to pick up the change. We don't - * warn here if addr changed as the vma is not linked by vma_link(). - */ - addr = vma->vm_start; + /* Update vm_flags to pick up the change. */ vm_flags = vma->vm_flags; goto unmap_writable; } } - /* Can addr have changed?? - * - * Answer: Yes, several device drivers can do it in their - * f_op->mmap method. -DaveM - * Bug: If addr is changed, prev, rb_link, rb_parent should - * be updated for vma_link() - */ - WARN_ON_ONCE(addr != vma->vm_start); - - addr = vma->vm_start; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); diff --git a/mm/slab.h b/mm/slab.h index 9a54a0cb5cca..ec3a4355dbf1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -257,22 +257,32 @@ static inline size_t obj_full_size(struct kmem_cache *s) return s->size + sizeof(struct obj_cgroup *); } -static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - size_t objects, - gfp_t flags) +/* + * Returns false if the allocation should fail. + */ +static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct obj_cgroup **objcgp, + size_t objects, gfp_t flags) { struct obj_cgroup *objcg; + if (!memcg_kmem_enabled()) + return true; + + if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) + return true; + objcg = get_obj_cgroup_from_current(); if (!objcg) - return NULL; + return true; if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { obj_cgroup_put(objcg); - return NULL; + return false; } - return objcg; + *objcgp = objcg; + return true; } static inline void mod_objcg_state(struct obj_cgroup *objcg, @@ -298,7 +308,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, unsigned long off; size_t i; - if (!objcg) + if (!memcg_kmem_enabled() || !objcg) return; flags &= ~__GFP_ACCOUNT; @@ -380,11 +390,11 @@ static inline void memcg_free_page_obj_cgroups(struct page *page) { } -static inline struct obj_cgroup *memcg_slab_pre_alloc_hook(struct kmem_cache *s, - size_t objects, - gfp_t flags) +static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, + struct obj_cgroup **objcgp, + size_t objects, gfp_t flags) { - return NULL; + return true; } static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, @@ -488,9 +498,8 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (should_failslab(s, flags)) return NULL; - if (memcg_kmem_enabled() && - ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) - *objcgp = memcg_slab_pre_alloc_hook(s, size, flags); + if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) + return NULL; return s; } @@ -509,8 +518,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, s->flags, flags); } - if (memcg_kmem_enabled()) - memcg_slab_post_alloc_hook(s, objcg, flags, size, p); + memcg_slab_post_alloc_hook(s, objcg, flags, size, p); } #ifndef CONFIG_SLOB diff --git a/mm/swapfile.c b/mm/swapfile.c index c4a613688a17..d58361109066 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2867,6 +2867,7 @@ late_initcall(max_swapfiles_check); static struct swap_info_struct *alloc_swap_info(void) { struct swap_info_struct *p; + struct swap_info_struct *defer = NULL; unsigned int type; int i; @@ -2895,7 +2896,7 @@ static struct swap_info_struct *alloc_swap_info(void) smp_wmb(); WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); } else { - kvfree(p); + defer = p; p = swap_info[type]; /* * Do not memset this entry: a racing procfs swap_next() @@ -2908,6 +2909,7 @@ static struct swap_info_struct *alloc_swap_info(void) plist_node_init(&p->avail_lists[i], 0); p->flags = SWP_USED; spin_unlock(&swap_lock); + kvfree(defer); spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 918c7b019b3d..cdfaaadea8ff 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -293,11 +293,7 @@ struct zspage { }; struct mapping_area { -#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING - struct vm_struct *vm; /* vm area for mapping object that span pages */ -#else char *vm_buf; /* copy buffer for objects that span pages */ -#endif char *vm_addr; /* address of kmap_atomic()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ }; @@ -1113,54 +1109,6 @@ static struct zspage *find_get_zspage(struct size_class *class) return zspage; } -#ifdef CONFIG_ZSMALLOC_PGTABLE_MAPPING -static inline int __zs_cpu_up(struct mapping_area *area) -{ - /* - * Make sure we don't leak memory if a cpu UP notification - * and zs_init() race and both call zs_cpu_up() on the same cpu - */ - if (area->vm) - return 0; - area->vm = get_vm_area(PAGE_SIZE * 2, 0); - if (!area->vm) - return -ENOMEM; - - /* - * Populate ptes in advance to avoid pte allocation with GFP_KERNEL - * in non-preemtible context of zs_map_object. - */ - return apply_to_page_range(&init_mm, (unsigned long)area->vm->addr, - PAGE_SIZE * 2, NULL, NULL); -} - -static inline void __zs_cpu_down(struct mapping_area *area) -{ - if (area->vm) - free_vm_area(area->vm); - area->vm = NULL; -} - -static inline void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - unsigned long addr = (unsigned long)area->vm->addr; - - BUG_ON(map_kernel_range(addr, PAGE_SIZE * 2, PAGE_KERNEL, pages) < 0); - area->vm_addr = area->vm->addr; - return area->vm_addr + off; -} - -static inline void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) -{ - unsigned long addr = (unsigned long)area->vm_addr; - - unmap_kernel_range(addr, PAGE_SIZE * 2); -} - -#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */ - static inline int __zs_cpu_up(struct mapping_area *area) { /* @@ -1241,8 +1189,6 @@ out: pagefault_enable(); } -#endif /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */ - static int zs_cpu_prepare(unsigned int cpu) { struct mapping_area *area; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index d0c1024bf600..4f1cd8063e72 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -758,6 +758,9 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) conn = hci_lookup_le_connect(hdev); + if (hdev->adv_instance_cnt) + hci_req_resume_adv_instances(hdev); + if (!status) { hci_connect_le_scan_cleanup(conn); goto done; @@ -1067,10 +1070,11 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * connections most controllers will refuse to connect if * advertising is enabled, and for slave role connections we * anyway have to disable it in order to start directed - * advertising. + * advertising. Any registered advertisements will be + * re-enabled after the connection attempt is finished. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(&req); + __hci_req_pause_adv_instances(&req); /* If requested to connect as slave use directed advertising */ if (conn->role == HCI_ROLE_SLAVE) { @@ -1118,6 +1122,10 @@ create_conn: err = hci_req_run(&req, create_le_conn_complete); if (err) { hci_conn_del(conn); + + if (hdev->adv_instance_cnt) + hci_req_resume_adv_instances(hdev); + return ERR_PTR(err); } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 502552d6e9af..9d2c9a1c552f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -741,6 +741,12 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); } + if (hdev->commands[38] & 0x80) { + /* Read LE Min/Max Tx Power*/ + hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER, + 0, NULL); + } + if (hdev->commands[26] & 0x40) { /* Read LE White List Size */ hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, @@ -763,7 +769,7 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); } - if (hdev->commands[35] & 0x40) { + if (hdev->commands[35] & 0x04) { __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); /* Set RPA timeout */ @@ -2951,7 +2957,8 @@ static void adv_instance_rpa_expired(struct work_struct *work) int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration) + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval) { struct adv_info *adv_instance; @@ -2979,6 +2986,9 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, adv_instance->flags = flags; adv_instance->adv_data_len = adv_data_len; adv_instance->scan_rsp_len = scan_rsp_len; + adv_instance->min_interval = min_interval; + adv_instance->max_interval = max_interval; + adv_instance->tx_power = tx_power; if (adv_data_len) memcpy(adv_instance->adv_data, adv_data, adv_data_len); @@ -2995,8 +3005,6 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, else adv_instance->duration = duration; - adv_instance->tx_power = HCI_TX_POWER_INVALID; - INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, adv_instance_rpa_expired); @@ -3006,6 +3014,37 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, } /* This function requires the caller holds hdev->lock */ +int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, + u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data) +{ + struct adv_info *adv_instance; + + adv_instance = hci_find_adv_instance(hdev, instance); + + /* If advertisement doesn't exist, we can't modify its data */ + if (!adv_instance) + return -ENOENT; + + if (adv_data_len) { + memset(adv_instance->adv_data, 0, + sizeof(adv_instance->adv_data)); + memcpy(adv_instance->adv_data, adv_data, adv_data_len); + adv_instance->adv_data_len = adv_data_len; + } + + if (scan_rsp_len) { + memset(adv_instance->scan_rsp_data, 0, + sizeof(adv_instance->scan_rsp_data)); + memcpy(adv_instance->scan_rsp_data, + scan_rsp_data, scan_rsp_len); + adv_instance->scan_rsp_len = scan_rsp_len; + } + + return 0; +} + +/* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { struct adv_monitor *monitor; @@ -3592,6 +3631,10 @@ struct hci_dev *hci_alloc_dev(void) hdev->cur_adv_instance = 0x00; hdev->adv_instance_timeout = 0; + hdev->advmon_allowlist_duration = 300; + hdev->advmon_no_filter_duration = 500; + hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ + hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; @@ -3623,6 +3666,8 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; + hdev->min_le_tx_power = HCI_TX_POWER_INVALID; + hdev->max_le_tx_power = HCI_TX_POWER_INVALID; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 5e8af2658e44..4626e0289a97 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -494,6 +494,45 @@ static int auto_accept_delay_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, auto_accept_delay_set, "%llu\n"); +static ssize_t force_bredr_smp_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_bredr_smp_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + err = smp_force_bredr(hdev, enable); + if (err) + return err; + + return count; +} + +static const struct file_operations force_bredr_smp_fops = { + .open = simple_open, + .read = force_bredr_smp_read, + .write = force_bredr_smp_write, + .llseek = default_llseek, +}; + static int idle_timeout_set(void *data, u64 val) { struct hci_dev *hdev = data; @@ -589,6 +628,17 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev) debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev, &voice_setting_fops); + /* If the controller does not support BR/EDR Secure Connections + * feature, then the BR/EDR SMP channel shall not be present. + * + * To test this with Bluetooth 4.0 controllers, create a debugfs + * switch that allows forcing BR/EDR SMP support and accepting + * cross-transport pairing on non-AES encrypted connections. + */ + if (!lmp_sc_capable(hdev)) + debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs, + hdev, &force_bredr_smp_fops); + if (lmp_ssp_capable(hdev)) { debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, hdev, &ssp_debug_mode_fops); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index f04963914366..67668be3461e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1202,6 +1202,20 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, hci_dev_unlock(hdev); } +static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_le_read_transmit_power *rp = (void *)skb->data; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->min_le_tx_power = rp->min_le_tx_power; + hdev->max_le_tx_power = rp->max_le_tx_power; +} + static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) { __u8 *sent, status = *((__u8 *) skb->data); @@ -1752,6 +1766,7 @@ static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) } /* Update adv data as tx power is known now */ hci_req_update_adv_data(hdev, hdev->cur_adv_instance); + hci_dev_unlock(hdev); } @@ -3581,6 +3596,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, hci_cc_le_set_adv_set_random_addr(hdev, skb); break; + case HCI_OP_LE_READ_TRANSMIT_POWER: + hci_cc_le_read_transmit_power(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); break; @@ -4936,15 +4955,15 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) { - hci_dev_unlock(hdev); - return; - } + if (!hcon) + goto unlock; + + if (!hcon->amp_mgr) + goto unlock; if (ev->status) { hci_conn_del(hcon); - hci_dev_unlock(hdev); - return; + goto unlock; } bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; @@ -4961,6 +4980,7 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, amp_physical_cfm(bredr_hcon, hcon); +unlock: hci_dev_unlock(hdev); } @@ -5868,21 +5888,19 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; - void *ptr = &skb->data[1]; + struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; - hci_dev_lock(hdev); + if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) + return; - while (num_reports--) { - struct hci_ev_le_direct_adv_info *ev = ptr; + hci_dev_lock(hdev); + for (; num_reports; num_reports--, ev++) process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, ev->direct_addr_type, ev->rssi, NULL, 0, false); - ptr += sizeof(*ev); - } - hci_dev_unlock(hdev); } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 6f12bab4d2fa..71bffd745472 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -58,7 +58,7 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete, struct sk_buff *skb; unsigned long flags; - BT_DBG("length %u", skb_queue_len(&req->cmd_q)); + bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. @@ -102,7 +102,7 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { - BT_DBG("%s result 0x%2.2x", hdev->name, result); + bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; @@ -115,7 +115,7 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, void hci_req_sync_cancel(struct hci_dev *hdev, int err) { - BT_DBG("%s err 0x%2.2x", hdev->name, err); + bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; @@ -131,7 +131,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, struct sk_buff *skb; int err = 0; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_req_init(&req, hdev); @@ -167,7 +167,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, skb = hdev->req_skb; hdev->req_skb = NULL; - BT_DBG("%s end: err %d", hdev->name, err); + bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); @@ -196,7 +196,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, struct hci_request req; int err = 0; - BT_DBG("%s start", hdev->name); + bt_dev_dbg(hdev, "start"); hci_req_init(&req, hdev); @@ -260,7 +260,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, hdev->req_skb = NULL; hdev->req_status = hdev->req_result = 0; - BT_DBG("%s end: err %d", hdev->name, err); + bt_dev_dbg(hdev, "end: err %d", err); return err; } @@ -300,7 +300,7 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, if (plen) skb_put_data(skb, param, plen); - BT_DBG("skb len %d", skb->len); + bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; @@ -315,7 +315,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, struct hci_dev *hdev = req->hdev; struct sk_buff *skb; - BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. @@ -378,6 +378,53 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); } +static void start_interleave_scan(struct hci_dev *hdev) +{ + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, 0); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static void cancel_interleave_scan(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, "cancelling interleave scan"); + + cancel_delayed_work_sync(&hdev->interleave_scan); + + hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; +} + +/* Return true if interleave_scan wasn't started until exiting this function, + * otherwise, return false + */ +static bool __hci_update_interleaved_scan(struct hci_dev *hdev) +{ + /* If there is at least one ADV monitors and one pending LE connection + * or one device to be scanned for, we should alternate between + * allowlist scan and one without any filters to save power. + */ + bool use_interleaving = hci_is_adv_monitoring(hdev) && + !(list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports)); + bool is_interleaving = is_interleave_scanning(hdev); + + if (use_interleaving && !is_interleaving) { + start_interleave_scan(hdev); + bt_dev_dbg(hdev, "starting interleave scan"); + return true; + } + + if (!use_interleaving && is_interleaving) + cancel_interleave_scan(hdev); + + return false; +} + /* This function controls the background scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it. @@ -413,8 +460,8 @@ static void __hci_update_background_scan(struct hci_request *req) */ hci_discovery_filter_clear(hdev); - BT_DBG("%s ADV monitoring is %s", hdev->name, - hci_is_adv_monitoring(hdev) ? "on" : "off"); + bt_dev_dbg(hdev, "ADV monitoring is %s", + hci_is_adv_monitoring(hdev) ? "on" : "off"); if (list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && @@ -430,7 +477,7 @@ static void __hci_update_background_scan(struct hci_request *req) hci_req_add_le_scan_disable(req, false); - BT_DBG("%s stopping background scanning", hdev->name); + bt_dev_dbg(hdev, "stopping background scanning"); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. @@ -450,8 +497,7 @@ static void __hci_update_background_scan(struct hci_request *req) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); - - BT_DBG("%s starting background scanning", hdev->name); + bt_dev_dbg(hdev, "starting background scanning"); } } @@ -661,6 +707,9 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) return; } + if (hdev->suspended) + set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); + if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; @@ -698,7 +747,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); - if (use_ll_privacy(req->hdev)) { + if (use_ll_privacy(req->hdev) && + hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); @@ -732,7 +782,8 @@ static int add_to_white_list(struct hci_request *req, return -1; /* White list can not be used with RPAs */ - if (!allow_rpa && !use_ll_privacy(hdev) && + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { return -1; } @@ -750,7 +801,8 @@ static int add_to_white_list(struct hci_request *req, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); - if (use_ll_privacy(hdev)) { + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, ¶ms->addr, @@ -812,7 +864,8 @@ static u8 update_white_list(struct hci_request *req) } /* White list can not be used with RPAs */ - if (!allow_rpa && !use_ll_privacy(hdev) && + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } @@ -844,12 +897,17 @@ static u8 update_white_list(struct hci_request *req) return 0x00; } - /* Once the controller offloading of advertisement monitor is in place, - * the if condition should include the support of MSFT extension - * support. If suspend is ongoing, whitelist should be the default to - * prevent waking by random advertisements. + /* Use the allowlist unless the following conditions are all true: + * - We are not currently suspending + * - There are 1 or more ADV monitors registered + * - Interleaved scanning is not currently using the allowlist + * + * Once the controller offloading of advertisement monitor is in place, + * the above condition should include the support of MSFT extension + * support. */ - if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended) + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && + hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) return 0x00; /* Select filter policy to use white list */ @@ -1002,6 +1060,11 @@ void hci_req_add_le_passive_scan(struct hci_request *req) &own_addr_type)) return; + if (hdev->enable_advmon_interleave_scan && + __hci_update_interleaved_scan(hdev)) + return; + + bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the white list must * happen before enabling scanning. The controller does * not allow white list modification while scanning. @@ -1040,22 +1103,23 @@ void hci_req_add_le_passive_scan(struct hci_request *req) own_addr_type, filter_policy, addr_resolv); } -static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) +static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; /* Instance 0x00 always set local name */ if (instance == 0x00) - return 1; + return true; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) - return 0; + return false; - /* TODO: Take into account the "appearance" and "local-name" flags here. - * These are currently being ignored as they are not supported. - */ - return adv_instance->scan_rsp_len; + if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || + adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) + return true; + + return adv_instance->scan_rsp_len ? true : false; } static void hci_req_clear_event_filter(struct hci_request *req) @@ -1098,6 +1162,11 @@ static void hci_req_set_event_filter(struct hci_request *req) scan = SCAN_PAGE; } + if (scan) + set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); + else + set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); + hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } @@ -1123,9 +1192,9 @@ static void cancel_adv_timeout(struct hci_dev *hdev) } /* This function requires the caller holds hdev->lock */ -static void hci_suspend_adv_instances(struct hci_request *req) +void __hci_req_pause_adv_instances(struct hci_request *req) { - bt_dev_dbg(req->hdev, "Suspending advertising instances"); + bt_dev_dbg(req->hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. @@ -1138,7 +1207,7 @@ static void hci_suspend_adv_instances(struct hci_request *req) } /* This function requires the caller holds hdev->lock */ -static void hci_resume_adv_instances(struct hci_request *req) +static void __hci_req_resume_adv_instances(struct hci_request *req) { struct adv_info *adv; @@ -1161,6 +1230,17 @@ static void hci_resume_adv_instances(struct hci_request *req) } } +/* This function requires the caller holds hdev->lock */ +int hci_req_resume_adv_instances(struct hci_dev *hdev) +{ + struct hci_request req; + + hci_req_init(&req, hdev); + __hci_req_resume_adv_instances(&req); + + return hci_req_run(&req, NULL); +} + static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, @@ -1214,7 +1294,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) /* Pause other advertisements */ if (hdev->adv_instance_cnt) - hci_suspend_adv_instances(&req); + __hci_req_pause_adv_instances(&req); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; @@ -1223,8 +1303,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); /* Disable LE passive scan if enabled */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + cancel_interleave_scan(hdev); hci_req_add_le_scan_disable(&req, false); + } /* Mark task needing completion */ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); @@ -1279,7 +1361,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) /* Resume other advertisements */ if (hdev->adv_instance_cnt) - hci_resume_adv_instances(&req); + __hci_req_resume_adv_instances(&req); /* Unpause discovery */ hdev->discovery_paused = false; @@ -1300,23 +1382,9 @@ done: wake_up(&hdev->suspend_wait_q); } -static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) +static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { - u8 instance = hdev->cur_adv_instance; - struct adv_info *adv_instance; - - /* Instance 0x00 always set local name */ - if (instance == 0x00) - return 1; - - adv_instance = hci_find_adv_instance(hdev, instance); - if (!adv_instance) - return 0; - - /* TODO: Take into account the "appearance" and "local-name" flags here. - * These are currently being ignored as they are not supported. - */ - return adv_instance->scan_rsp_len; + return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); } void __hci_req_disable_advertising(struct hci_request *req) @@ -1428,6 +1496,7 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) void __hci_req_enable_advertising(struct hci_request *req) { struct hci_dev *hdev = req->hdev; + struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; @@ -1435,6 +1504,7 @@ void __hci_req_enable_advertising(struct hci_request *req) u32 flags; flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. @@ -1466,13 +1536,18 @@ void __hci_req_enable_advertising(struct hci_request *req) memset(&cp, 0, sizeof(cp)); - if (connectable) { - cp.type = LE_ADV_IND; - + if (adv_instance) { + adv_min_interval = adv_instance->min_interval; + adv_max_interval = adv_instance->max_interval; + } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; + } + + if (connectable) { + cp.type = LE_ADV_IND; } else { - if (get_cur_adv_instance_scan_rsp_len(hdev)) + if (adv_cur_instance_is_scannable(hdev)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; @@ -1481,9 +1556,6 @@ void __hci_req_enable_advertising(struct hci_request *req) hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; - } else { - adv_min_interval = hdev->le_adv_min_interval; - adv_max_interval = hdev->le_adv_max_interval; } } @@ -1591,14 +1663,11 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - /* Extended scan response data doesn't allow a response to be - * set if the instance isn't scannable. - */ - if (get_adv_instance_scan_rsp_len(hdev, instance)) + if (instance) len = create_instance_scan_rsp_data(hdev, instance, cp.data); else - len = 0; + len = create_default_scan_rsp_data(hdev, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) @@ -1811,7 +1880,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev) static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { - BT_DBG("%s status %u", hdev->name, status); + bt_dev_dbg(hdev, "status %u", status); } void hci_req_reenable_advertising(struct hci_dev *hdev) @@ -1848,7 +1917,7 @@ static void adv_timeout_expire(struct work_struct *work) struct hci_request req; u8 instance; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -1871,6 +1940,62 @@ unlock: hci_dev_unlock(hdev); } +static int hci_req_add_le_interleaved_scan(struct hci_request *req, + unsigned long opt) +{ + struct hci_dev *hdev = req->hdev; + int ret = 0; + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + hci_req_add_le_scan_disable(req, false); + hci_req_add_le_passive_scan(req); + + switch (hdev->interleave_scan_state) { + case INTERLEAVE_SCAN_ALLOWLIST: + bt_dev_dbg(hdev, "next state: allowlist"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + break; + case INTERLEAVE_SCAN_NO_FILTER: + bt_dev_dbg(hdev, "next state: no filter"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; + break; + case INTERLEAVE_SCAN_NONE: + BT_ERR("unexpected error"); + ret = -1; + } + + hci_dev_unlock(hdev); + + return ret; +} + +static void interleave_scan_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + interleave_scan.work); + u8 status; + unsigned long timeout; + + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); + } else { + bt_dev_err(hdev, "unexpected error"); + return; + } + + hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, + HCI_CMD_TIMEOUT, &status); + + /* Don't continue interleaving if it was canceled */ + if (is_interleave_scanning(hdev)) + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, timeout); +} + int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) @@ -2006,9 +2131,15 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - /* In ext adv set param interval is 3 octets */ - hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); - hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); + if (adv_instance) { + hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); + hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); + cp.tx_power = adv_instance->tx_power; + } else { + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); + cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); @@ -2017,7 +2148,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); - } else if (get_adv_instance_scan_rsp_len(hdev, instance)) { + } else if (adv_instance_is_scannable(hdev, instance)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else @@ -2031,7 +2162,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; - cp.tx_power = 127; cp.handle = instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { @@ -2332,7 +2462,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev)) { - BT_DBG("Deferring random address update"); + bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; } @@ -2557,7 +2687,7 @@ void __hci_req_update_class(struct hci_request *req) struct hci_dev *hdev = req->hdev; u8 cod[3]; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return; @@ -2726,7 +2856,7 @@ void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) { if (status) - BT_DBG("Failed to abort connection: status 0x%2.2x", status); + bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status); } int hci_abort_conn(struct hci_conn *conn, u8 reason) @@ -2789,7 +2919,7 @@ static int bredr_inquiry(struct hci_request *req, unsigned long opt) const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; - BT_DBG("%s", req->hdev->name); + bt_dev_dbg(req->hdev, ""); hci_dev_lock(req->hdev); hci_inquiry_cache_flush(req->hdev); @@ -2815,7 +2945,7 @@ static void le_scan_disable_work(struct work_struct *work) le_scan_disable.work); u8 status; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; @@ -2911,7 +3041,7 @@ static void le_scan_restart_work(struct work_struct *work) unsigned long timeout, duration, scan_start, now; u8 status; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); if (status) { @@ -2965,14 +3095,16 @@ static int active_scan(struct hci_request *req, unsigned long opt) bool addr_resolv = false; int err; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the background scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req, false); + cancel_interleave_scan(hdev); + } /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable @@ -2993,7 +3125,7 @@ static int interleaved_discov(struct hci_request *req, unsigned long opt) { int err; - BT_DBG("%s", req->hdev->name); + bt_dev_dbg(req->hdev, ""); err = active_scan(req, opt); if (err) @@ -3006,7 +3138,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status) { unsigned long timeout; - BT_DBG("%s type %u", hdev->name, hdev->discovery.type); + bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: @@ -3054,7 +3186,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status) if (*status) return; - BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); + bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); /* When service discovery is used and the controller has a * strict duplicate filter, it is important to remember the @@ -3079,7 +3211,7 @@ bool hci_req_stop_discovery(struct hci_request *req) struct inquiry_entry *e; bool ret = false; - BT_DBG("%s state %u", hdev->name, hdev->discovery.state); + bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) @@ -3159,7 +3291,7 @@ static void discov_off(struct work_struct *work) struct hci_dev *hdev = container_of(work, struct hci_dev, discov_off.work); - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); @@ -3298,6 +3430,7 @@ void hci_request_setup(struct hci_dev *hdev) INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } void hci_request_cancel_all(struct hci_dev *hdev) @@ -3317,4 +3450,6 @@ void hci_request_cancel_all(struct hci_dev *hdev) cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } + + cancel_interleave_scan(hdev); } diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 6a12e84c66c4..39ee8a18087a 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -71,6 +71,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); void hci_req_disable_address_resolution(struct hci_dev *hdev); +void __hci_req_pause_adv_instances(struct hci_request *req); +int hci_req_resume_adv_instances(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev); void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 3b4fa27a44e6..0db48c812662 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -1290,7 +1290,7 @@ static int hidp_session_thread(void *arg) /* cleanup runtime environment */ remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); - remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait); + remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); wake_up_interruptible(&session->report_queue); hidp_del_timer(session); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 1ab27b90ddcb..17b87b57a175 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1515,8 +1515,14 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) * that have no key size requirements. Ensure that the link is * actually encrypted before enforcing a key size. */ + int min_key_size = hcon->hdev->min_enc_key_size; + + /* On FIPS security level, key size must be 16 bytes */ + if (hcon->sec_level == BT_SECURITY_FIPS) + min_key_size = 16; + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || - hcon->enc_key_size >= hcon->hdev->min_enc_key_size); + hcon->enc_key_size >= min_key_size); } static void l2cap_do_start(struct l2cap_chan *chan) @@ -3627,7 +3633,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data if (hint) break; result = L2CAP_CONF_UNKNOWN; - *((u8 *) ptr++) = type; + l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); break; } } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 12d7b368b428..fa0f7a4a1d2f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -40,7 +40,7 @@ #include "msft.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 18 +#define MGMT_REVISION 19 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -110,7 +110,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_APPEARANCE, MGMT_OP_SET_BLOCKED_KEYS, MGMT_OP_SET_WIDEBAND_SPEECH, - MGMT_OP_READ_SECURITY_INFO, + MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_SET_EXP_FEATURE, MGMT_OP_READ_DEF_SYSTEM_CONFIG, @@ -122,6 +122,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_OP_ADD_EXT_ADV_DATA, }; static const u16 mgmt_events[] = { @@ -174,7 +176,7 @@ static const u16 mgmt_untrusted_commands[] = { MGMT_OP_READ_CONFIG_INFO, MGMT_OP_READ_EXT_INDEX_LIST, MGMT_OP_READ_EXT_INFO, - MGMT_OP_READ_SECURITY_INFO, + MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, @@ -3387,7 +3389,7 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { - struct mgmt_rp_get_phy_confguration rp; + struct mgmt_rp_get_phy_configuration rp; bt_dev_dbg(hdev, "sock %p", sk); @@ -3451,7 +3453,7 @@ unlock: static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { - struct mgmt_cp_set_phy_confguration *cp = data; + struct mgmt_cp_set_phy_configuration *cp = data; struct hci_cp_le_set_default_phy cp_phy; struct mgmt_pending_cmd *cmd; struct hci_request req; @@ -3708,13 +3710,14 @@ unlock: return err; } -static int read_security_info(struct sock *sk, struct hci_dev *hdev, - void *data, u16 data_len) +static int read_controller_cap(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) { - char buf[16]; - struct mgmt_rp_read_security_info *rp = (void *)buf; - u16 sec_len = 0; + char buf[20]; + struct mgmt_rp_read_controller_cap *rp = (void *)buf; + u16 cap_len = 0; u8 flags = 0; + u8 tx_power_range[2]; bt_dev_dbg(hdev, "sock %p", sk); @@ -3738,23 +3741,37 @@ static int read_security_info(struct sock *sk, struct hci_dev *hdev, flags |= 0x08; /* Encryption key size enforcement (LE) */ - sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1); + cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS, + &flags, 1); /* When the Read Simple Pairing Options command is supported, then * also max encryption key size information is provided. */ if (hdev->commands[41] & 0x08) - sec_len = eir_append_le16(rp->sec, sec_len, 0x02, + cap_len = eir_append_le16(rp->cap, cap_len, + MGMT_CAP_MAX_ENC_KEY_SIZE, hdev->max_enc_key_size); - sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE); + cap_len = eir_append_le16(rp->cap, cap_len, + MGMT_CAP_SMP_MAX_ENC_KEY_SIZE, + SMP_MAX_ENC_KEY_SIZE); + + /* Append the min/max LE tx power parameters if we were able to fetch + * it from the controller + */ + if (hdev->commands[38] & 0x80) { + memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1); + memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1); + cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR, + tx_power_range, 2); + } - rp->sec_len = cpu_to_le16(sec_len); + rp->cap_len = cpu_to_le16(cap_len); hci_dev_unlock(hdev); - return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0, - rp, sizeof(*rp) + sec_len); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0, + rp, sizeof(*rp) + cap_len); } #ifdef CONFIG_BT_FEATURE_DEBUG @@ -7203,6 +7220,10 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) flags |= MGMT_ADV_FLAG_MANAGED_FLAGS; flags |= MGMT_ADV_FLAG_APPEARANCE; flags |= MGMT_ADV_FLAG_LOCAL_NAME; + flags |= MGMT_ADV_PARAM_DURATION; + flags |= MGMT_ADV_PARAM_TIMEOUT; + flags |= MGMT_ADV_PARAM_INTERVALS; + flags |= MGMT_ADV_PARAM_TX_POWER; /* In extended adv TX_POWER returned from Set Adv Param * will be always valid. @@ -7377,6 +7398,31 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, return true; } +static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags) +{ + u32 supported_flags, phy_flags; + + /* The current implementation only supports a subset of the specified + * flags. Also need to check mutual exclusiveness of sec flags. + */ + supported_flags = get_supported_adv_flags(hdev); + phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK; + if (adv_flags & ~supported_flags || + ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) + return false; + + return true; +} + +static bool adv_busy(struct hci_dev *hdev) +{ + return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || + pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || + pending_find(MGMT_OP_SET_LE, hdev) || + pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) || + pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev)); +} + static void add_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode) { @@ -7391,6 +7437,8 @@ static void add_advertising_complete(struct hci_dev *hdev, u8 status, hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev); + if (!cmd) + cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev); list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!adv_instance->pending) @@ -7435,7 +7483,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_add_advertising *cp = data; struct mgmt_rp_add_advertising rp; u32 flags; - u32 supported_flags, phy_flags; u8 status; u16 timeout, duration; unsigned int prev_instance_cnt = hdev->adv_instance_cnt; @@ -7471,13 +7518,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, timeout = __le16_to_cpu(cp->timeout); duration = __le16_to_cpu(cp->duration); - /* The current implementation only supports a subset of the specified - * flags. Also need to check mutual exclusiveness of sec flags. - */ - supported_flags = get_supported_adv_flags(hdev); - phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK; - if (flags & ~supported_flags || - ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) + if (!requested_adv_flags_are_valid(hdev, flags)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -7489,9 +7530,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || - pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || - pending_find(MGMT_OP_SET_LE, hdev)) { + if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; @@ -7509,7 +7548,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len, - timeout, duration); + timeout, duration, + HCI_ADV_TX_POWER_NO_PREFERENCE, + hdev->le_adv_min_interval, + hdev->le_adv_max_interval); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); @@ -7582,6 +7624,338 @@ unlock: return err; } +static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + struct mgmt_pending_cmd *cmd; + struct mgmt_cp_add_ext_adv_params *cp; + struct mgmt_rp_add_ext_adv_params rp; + struct adv_info *adv_instance; + u32 flags; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev); + if (!cmd) + goto unlock; + + cp = cmd->param; + adv_instance = hci_find_adv_instance(hdev, cp->instance); + if (!adv_instance) + goto unlock; + + rp.instance = cp->instance; + rp.tx_power = adv_instance->tx_power; + + /* While we're at it, inform userspace of the available space for this + * advertisement, given the flags that will be used. + */ + flags = __le32_to_cpu(cp->flags); + rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); + rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); + + if (status) { + /* If this advertisement was previously advertising and we + * failed to update it, we signal that it has been removed and + * delete its structure + */ + if (!adv_instance->pending) + mgmt_advertising_removed(cmd->sk, hdev, cp->instance); + + hci_remove_adv_instance(hdev, cp->instance); + + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status)); + + } else { + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); + } + +unlock: + if (cmd) + mgmt_pending_remove(cmd); + + hci_dev_unlock(hdev); +} + +static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_add_ext_adv_params *cp = data; + struct mgmt_rp_add_ext_adv_params rp; + struct mgmt_pending_cmd *cmd = NULL; + struct adv_info *adv_instance; + struct hci_request req; + u32 flags, min_interval, max_interval; + u16 timeout, duration; + u8 status; + s8 tx_power; + int err; + + BT_DBG("%s", hdev->name); + + status = mgmt_le_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + status); + + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + /* The purpose of breaking add_advertising into two separate MGMT calls + * for params and data is to allow more parameters to be added to this + * structure in the future. For this reason, we verify that we have the + * bare minimum structure we know of when the interface was defined. Any + * extra parameters we don't know about will be ignored in this request. + */ + if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + + flags = __le32_to_cpu(cp->flags); + + if (!requested_adv_flags_are_valid(hdev, flags)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + /* In new interface, we require that we are powered to register */ + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (adv_busy(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_BUSY); + goto unlock; + } + + /* Parse defined parameters from request, use defaults otherwise */ + timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ? + __le16_to_cpu(cp->timeout) : 0; + + duration = (flags & MGMT_ADV_PARAM_DURATION) ? + __le16_to_cpu(cp->duration) : + hdev->def_multi_adv_rotation_duration; + + min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? + __le32_to_cpu(cp->min_interval) : + hdev->le_adv_min_interval; + + max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? + __le32_to_cpu(cp->max_interval) : + hdev->le_adv_max_interval; + + tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ? + cp->tx_power : + HCI_ADV_TX_POWER_NO_PREFERENCE; + + /* Create advertising instance with no advertising or response data */ + err = hci_add_adv_instance(hdev, cp->instance, flags, + 0, NULL, 0, NULL, timeout, duration, + tx_power, min_interval, max_interval); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_FAILED); + goto unlock; + } + + hdev->cur_adv_instance = cp->instance; + /* Submit request for advertising params if ext adv available */ + if (ext_adv_capable(hdev)) { + hci_req_init(&req, hdev); + adv_instance = hci_find_adv_instance(hdev, cp->instance); + + /* Updating parameters of an active instance will return a + * Command Disallowed error, so we must first disable the + * instance if it is active. + */ + if (!adv_instance->pending) + __hci_req_disable_ext_adv_instance(&req, cp->instance); + + __hci_req_setup_ext_adv_instance(&req, cp->instance); + + err = hci_req_run(&req, add_ext_adv_params_complete); + + if (!err) + cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, + hdev, data, data_len); + if (!cmd) { + err = -ENOMEM; + hci_remove_adv_instance(hdev, cp->instance); + goto unlock; + } + + } else { + rp.instance = cp->instance; + rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); + rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + } + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_add_ext_adv_data *cp = data; + struct mgmt_rp_add_ext_adv_data rp; + u8 schedule_instance = 0; + struct adv_info *next_instance; + struct adv_info *adv_instance; + int err = 0; + struct mgmt_pending_cmd *cmd; + struct hci_request req; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + adv_instance = hci_find_adv_instance(hdev, cp->instance); + + if (!adv_instance) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + /* In new interface, we require that we are powered to register */ + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_REJECTED); + goto clear_new_instance; + } + + if (adv_busy(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_BUSY); + goto clear_new_instance; + } + + /* Validate new data */ + if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data, + cp->adv_data_len, true) || + !tlv_data_is_valid(hdev, adv_instance->flags, cp->data + + cp->adv_data_len, cp->scan_rsp_len, false)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_INVALID_PARAMS); + goto clear_new_instance; + } + + /* Set the data in the advertising instance */ + hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len, + cp->data, cp->scan_rsp_len, + cp->data + cp->adv_data_len); + + /* We're good to go, update advertising data, parameters, and start + * advertising. + */ + + hci_req_init(&req, hdev); + + hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL); + + if (ext_adv_capable(hdev)) { + __hci_req_update_adv_data(&req, cp->instance); + __hci_req_update_scan_rsp_data(&req, cp->instance); + __hci_req_enable_ext_advertising(&req, cp->instance); + + } else { + /* If using software rotation, determine next instance to use */ + + if (hdev->cur_adv_instance == cp->instance) { + /* If the currently advertised instance is being changed + * then cancel the current advertising and schedule the + * next instance. If there is only one instance then the + * overridden advertising data will be visible right + * away + */ + cancel_adv_timeout(hdev); + + next_instance = hci_get_next_instance(hdev, + cp->instance); + if (next_instance) + schedule_instance = next_instance->instance; + } else if (!hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other + * instance is currently being advertised. + */ + schedule_instance = cp->instance; + } + + /* If the HCI_ADVERTISING flag is set or there is no instance to + * be advertised then we have no HCI communication to make. + * Simply return. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + !schedule_instance) { + if (adv_instance->pending) { + mgmt_advertising_added(sk, hdev, cp->instance); + adv_instance->pending = false; + } + rp.instance = cp->instance; + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_SUCCESS, &rp, + sizeof(rp)); + goto unlock; + } + + err = __hci_req_schedule_adv_instance(&req, schedule_instance, + true); + } + + cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, + data_len); + if (!cmd) { + err = -ENOMEM; + goto clear_new_instance; + } + + if (!err) + err = hci_req_run(&req, add_advertising_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_FAILED); + mgmt_pending_remove(cmd); + goto clear_new_instance; + } + + /* We were successful in updating data, so trigger advertising_added + * event if this is an instance that wasn't previously advertising. If + * a failure occurs in the requests we initiated, we will remove the + * instance again in add_advertising_complete + */ + if (adv_instance->pending) + mgmt_advertising_added(sk, hdev, cp->instance); + + goto unlock; + +clear_new_instance: + hci_remove_adv_instance(hdev, cp->instance); + +unlock: + hci_dev_unlock(hdev); + + return err; +} + static void remove_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode) { @@ -7834,7 +8208,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { set_wideband_speech, MGMT_SETTING_SIZE }, - { read_security_info, MGMT_READ_SECURITY_INFO_SIZE, + { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE, HCI_MGMT_UNTRUSTED }, { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE, HCI_MGMT_UNTRUSTED | @@ -7856,6 +8230,10 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, + { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE, + HCI_MGMT_VAR_LEN }, + { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index b30b571f8caf..1deb0ca7a929 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -11,74 +11,119 @@ #include "mgmt_util.h" #include "mgmt_config.h" -#define HDEV_PARAM_U16(_param_code_, _param_name_) \ -{ \ - { cpu_to_le16(_param_code_), sizeof(__u16) }, \ - { cpu_to_le16(hdev->_param_name_) } \ -} +#define HDEV_PARAM_U16(_param_name_) \ + struct {\ + struct mgmt_tlv entry; \ + __le16 value; \ + } __packed _param_name_ -#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ -{ \ - { cpu_to_le16(_param_code_), sizeof(__u16) }, \ - { cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \ -} +#define HDEV_PARAM_U8(_param_name_) \ + struct {\ + struct mgmt_tlv entry; \ + __u8 value; \ + } __packed _param_name_ + +#define TLV_SET_U16(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + cpu_to_le16(hdev->_param_name_) \ + } + +#define TLV_SET_U8(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u8) }, \ + hdev->_param_name_ \ + } + +#define TLV_SET_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) \ + } int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - struct { - struct mgmt_tlv entry; - union { - /* This is a simplification for now since all values - * are 16 bits. In the future, this code may need - * refactoring to account for variable length values - * and properly calculate the required buffer size. - */ - __le16 value; - }; - } __packed params[] = { + int ret; + struct mgmt_rp_read_def_system_config { /* Please see mgmt-api.txt for documentation of these values */ - HDEV_PARAM_U16(0x0000, def_page_scan_type), - HDEV_PARAM_U16(0x0001, def_page_scan_int), - HDEV_PARAM_U16(0x0002, def_page_scan_window), - HDEV_PARAM_U16(0x0003, def_inq_scan_type), - HDEV_PARAM_U16(0x0004, def_inq_scan_int), - HDEV_PARAM_U16(0x0005, def_inq_scan_window), - HDEV_PARAM_U16(0x0006, def_br_lsto), - HDEV_PARAM_U16(0x0007, def_page_timeout), - HDEV_PARAM_U16(0x0008, sniff_min_interval), - HDEV_PARAM_U16(0x0009, sniff_max_interval), - HDEV_PARAM_U16(0x000a, le_adv_min_interval), - HDEV_PARAM_U16(0x000b, le_adv_max_interval), - HDEV_PARAM_U16(0x000c, def_multi_adv_rotation_duration), - HDEV_PARAM_U16(0x000d, le_scan_interval), - HDEV_PARAM_U16(0x000e, le_scan_window), - HDEV_PARAM_U16(0x000f, le_scan_int_suspend), - HDEV_PARAM_U16(0x0010, le_scan_window_suspend), - HDEV_PARAM_U16(0x0011, le_scan_int_discovery), - HDEV_PARAM_U16(0x0012, le_scan_window_discovery), - HDEV_PARAM_U16(0x0013, le_scan_int_adv_monitor), - HDEV_PARAM_U16(0x0014, le_scan_window_adv_monitor), - HDEV_PARAM_U16(0x0015, le_scan_int_connect), - HDEV_PARAM_U16(0x0016, le_scan_window_connect), - HDEV_PARAM_U16(0x0017, le_conn_min_interval), - HDEV_PARAM_U16(0x0018, le_conn_max_interval), - HDEV_PARAM_U16(0x0019, le_conn_latency), - HDEV_PARAM_U16(0x001a, le_supv_timeout), - HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b, - def_le_autoconnect_timeout), + HDEV_PARAM_U16(def_page_scan_type); + HDEV_PARAM_U16(def_page_scan_int); + HDEV_PARAM_U16(def_page_scan_window); + HDEV_PARAM_U16(def_inq_scan_type); + HDEV_PARAM_U16(def_inq_scan_int); + HDEV_PARAM_U16(def_inq_scan_window); + HDEV_PARAM_U16(def_br_lsto); + HDEV_PARAM_U16(def_page_timeout); + HDEV_PARAM_U16(sniff_min_interval); + HDEV_PARAM_U16(sniff_max_interval); + HDEV_PARAM_U16(le_adv_min_interval); + HDEV_PARAM_U16(le_adv_max_interval); + HDEV_PARAM_U16(def_multi_adv_rotation_duration); + HDEV_PARAM_U16(le_scan_interval); + HDEV_PARAM_U16(le_scan_window); + HDEV_PARAM_U16(le_scan_int_suspend); + HDEV_PARAM_U16(le_scan_window_suspend); + HDEV_PARAM_U16(le_scan_int_discovery); + HDEV_PARAM_U16(le_scan_window_discovery); + HDEV_PARAM_U16(le_scan_int_adv_monitor); + HDEV_PARAM_U16(le_scan_window_adv_monitor); + HDEV_PARAM_U16(le_scan_int_connect); + HDEV_PARAM_U16(le_scan_window_connect); + HDEV_PARAM_U16(le_conn_min_interval); + HDEV_PARAM_U16(le_conn_max_interval); + HDEV_PARAM_U16(le_conn_latency); + HDEV_PARAM_U16(le_supv_timeout); + HDEV_PARAM_U16(def_le_autoconnect_timeout); + HDEV_PARAM_U16(advmon_allowlist_duration); + HDEV_PARAM_U16(advmon_no_filter_duration); + HDEV_PARAM_U8(enable_advmon_interleave_scan); + } __packed rp = { + TLV_SET_U16(0x0000, def_page_scan_type), + TLV_SET_U16(0x0001, def_page_scan_int), + TLV_SET_U16(0x0002, def_page_scan_window), + TLV_SET_U16(0x0003, def_inq_scan_type), + TLV_SET_U16(0x0004, def_inq_scan_int), + TLV_SET_U16(0x0005, def_inq_scan_window), + TLV_SET_U16(0x0006, def_br_lsto), + TLV_SET_U16(0x0007, def_page_timeout), + TLV_SET_U16(0x0008, sniff_min_interval), + TLV_SET_U16(0x0009, sniff_max_interval), + TLV_SET_U16(0x000a, le_adv_min_interval), + TLV_SET_U16(0x000b, le_adv_max_interval), + TLV_SET_U16(0x000c, def_multi_adv_rotation_duration), + TLV_SET_U16(0x000d, le_scan_interval), + TLV_SET_U16(0x000e, le_scan_window), + TLV_SET_U16(0x000f, le_scan_int_suspend), + TLV_SET_U16(0x0010, le_scan_window_suspend), + TLV_SET_U16(0x0011, le_scan_int_discovery), + TLV_SET_U16(0x0012, le_scan_window_discovery), + TLV_SET_U16(0x0013, le_scan_int_adv_monitor), + TLV_SET_U16(0x0014, le_scan_window_adv_monitor), + TLV_SET_U16(0x0015, le_scan_int_connect), + TLV_SET_U16(0x0016, le_scan_window_connect), + TLV_SET_U16(0x0017, le_conn_min_interval), + TLV_SET_U16(0x0018, le_conn_max_interval), + TLV_SET_U16(0x0019, le_conn_latency), + TLV_SET_U16(0x001a, le_supv_timeout), + TLV_SET_U16_JIFFIES_TO_MSECS(0x001b, + def_le_autoconnect_timeout), + TLV_SET_U16(0x001d, advmon_allowlist_duration), + TLV_SET_U16(0x001e, advmon_no_filter_duration), + TLV_SET_U8(0x001f, enable_advmon_interleave_scan), }; - struct mgmt_rp_read_def_system_config *rp = (void *)params; bt_dev_dbg(hdev, "sock %p", sk); - return mgmt_cmd_complete(sk, hdev->id, - MGMT_OP_READ_DEF_SYSTEM_CONFIG, - 0, rp, sizeof(params)); + ret = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + 0, &rp, sizeof(rp)); + return ret; } #define TO_TLV(x) ((struct mgmt_tlv *)(x)) #define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value))) +#define TLV_GET_U8(tlv) (*((__u8 *)(TO_TLV(tlv)->value))) int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) @@ -95,6 +140,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, /* First pass to validate the tlv */ while (buffer_left >= sizeof(struct mgmt_tlv)) { const u8 len = TO_TLV(buffer)->length; + size_t exp_type_len; const u16 exp_len = sizeof(struct mgmt_tlv) + len; const u16 type = le16_to_cpu(TO_TLV(buffer)->type); @@ -138,20 +184,28 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, case 0x0019: case 0x001a: case 0x001b: - if (len != sizeof(u16)) { - bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d", - len, sizeof(u16), type); - - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_DEF_SYSTEM_CONFIG, - MGMT_STATUS_INVALID_PARAMS); - } + case 0x001d: + case 0x001e: + exp_type_len = sizeof(u16); + break; + case 0x001f: + exp_type_len = sizeof(u8); break; default: + exp_type_len = 0; bt_dev_warn(hdev, "unsupported parameter %u", type); break; } + if (exp_type_len && len != exp_type_len) { + bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d", + len, exp_type_len, type); + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + buffer_left -= exp_len; buffer += exp_len; } @@ -251,6 +305,15 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, hdev->def_le_autoconnect_timeout = msecs_to_jiffies(TLV_GET_LE16(buffer)); break; + case 0x0001d: + hdev->advmon_allowlist_duration = TLV_GET_LE16(buffer); + break; + case 0x0001e: + hdev->advmon_no_filter_duration = TLV_GET_LE16(buffer); + break; + case 0x0001f: + hdev->enable_advmon_interleave_scan = TLV_GET_U8(buffer); + break; default: bt_dev_warn(hdev, "unsupported parameter %u", type); break; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79ffcdef0b7a..22a110f37abc 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -1003,6 +1003,11 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, case BT_SNDMTU: case BT_RCVMTU: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) err = -EFAULT; break; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index bf4bef13d935..c659c464f7ca 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -3353,31 +3353,8 @@ static void smp_del_chan(struct l2cap_chan *chan) l2cap_chan_put(chan); } -static ssize_t force_bredr_smp_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +int smp_force_bredr(struct hci_dev *hdev, bool enable) { - struct hci_dev *hdev = file->private_data; - char buf[3]; - - buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y': 'N'; - buf[1] = '\n'; - buf[2] = '\0'; - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t force_bredr_smp_write(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct hci_dev *hdev = file->private_data; - bool enable; - int err; - - err = kstrtobool_from_user(user_buf, count, &enable); - if (err) - return err; - if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return -EALREADY; @@ -3399,16 +3376,9 @@ static ssize_t force_bredr_smp_write(struct file *file, hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP); - return count; + return 0; } -static const struct file_operations force_bredr_smp_fops = { - .open = simple_open, - .read = force_bredr_smp_read, - .write = force_bredr_smp_write, - .llseek = default_llseek, -}; - int smp_register(struct hci_dev *hdev) { struct l2cap_chan *chan; @@ -3433,17 +3403,7 @@ int smp_register(struct hci_dev *hdev) hdev->smp_data = chan; - /* If the controller does not support BR/EDR Secure Connections - * feature, then the BR/EDR SMP channel shall not be present. - * - * To test this with Bluetooth 4.0 controllers, create a debugfs - * switch that allows forcing BR/EDR SMP support and accepting - * cross-transport pairing on non-AES encrypted connections. - */ if (!lmp_sc_capable(hdev)) { - debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs, - hdev, &force_bredr_smp_fops); - /* Flag can be already set here (due to power toggle) */ if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) return 0; diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 121edadd5f8d..fc35a8bf358e 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -193,6 +193,8 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]); +int smp_force_bredr(struct hci_dev *hdev, bool enable); + int smp_register(struct hci_dev *hdev); void smp_unregister(struct hci_dev *hdev); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index adb674a860d3..3f2f06b4dd27 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -173,6 +173,9 @@ static int br_dev_open(struct net_device *dev) br_stp_enable_bridge(br); br_multicast_open(br); + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_join_snoopers(br); + return 0; } @@ -193,6 +196,9 @@ static int br_dev_stop(struct net_device *dev) br_stp_disable_bridge(br); br_multicast_stop(br); + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_leave_snoopers(br); + netif_stop_queue(dev); return 0; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 484820c223a3..257ac4e25f6d 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -3291,7 +3291,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br) } #endif -static void br_multicast_join_snoopers(struct net_bridge *br) +void br_multicast_join_snoopers(struct net_bridge *br) { br_ip4_multicast_join_snoopers(br); br_ip6_multicast_join_snoopers(br); @@ -3322,7 +3322,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br) } #endif -static void br_multicast_leave_snoopers(struct net_bridge *br) +void br_multicast_leave_snoopers(struct net_bridge *br) { br_ip4_multicast_leave_snoopers(br); br_ip6_multicast_leave_snoopers(br); @@ -3341,9 +3341,6 @@ static void __br_multicast_open(struct net_bridge *br, void br_multicast_open(struct net_bridge *br) { - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) - br_multicast_join_snoopers(br); - __br_multicast_open(br, &br->ip4_own_query); #if IS_ENABLED(CONFIG_IPV6) __br_multicast_open(br, &br->ip6_own_query); @@ -3359,9 +3356,6 @@ void br_multicast_stop(struct net_bridge *br) del_timer_sync(&br->ip6_other_query.timer); del_timer_sync(&br->ip6_own_query.timer); #endif - - if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) - br_multicast_leave_snoopers(br); } void br_multicast_dev_del(struct net_bridge *br) @@ -3492,6 +3486,7 @@ static void br_multicast_start_querier(struct net_bridge *br, int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; + bool change_snoopers = false; spin_lock_bh(&br->multicast_lock); if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val) @@ -3500,7 +3495,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) br_mc_disabled_update(br->dev, val); br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val); if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) { - br_multicast_leave_snoopers(br); + change_snoopers = true; goto unlock; } @@ -3511,9 +3506,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) list_for_each_entry(port, &br->port_list, list) __br_multicast_enable_port(port); + change_snoopers = true; + unlock: spin_unlock_bh(&br->multicast_lock); + /* br_multicast_join_snoopers has the potential to cause + * an MLD Report/Leave to be delivered to br_multicast_rcv, + * which would in turn call br_multicast_add_group, which would + * attempt to acquire multicast_lock. This function should be + * called after the lock has been released to avoid deadlocks on + * multicast_lock. + * + * br_multicast_leave_snoopers does not have the problem since + * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and + * returns without calling br_multicast_ipv4/6_rcv if it's not + * enabled. Moved both functions out just for symmetry. + */ + if (change_snoopers) { + if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) + br_multicast_join_snoopers(br); + else + br_multicast_leave_snoopers(br); + } + return 0; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d538ccec0acd..d62c6e1af64a 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -797,6 +797,8 @@ void br_multicast_del_port(struct net_bridge_port *port); void br_multicast_enable_port(struct net_bridge_port *port); void br_multicast_disable_port(struct net_bridge_port *port); void br_multicast_init(struct net_bridge *br); +void br_multicast_join_snoopers(struct net_bridge *br); +void br_multicast_leave_snoopers(struct net_bridge *br); void br_multicast_open(struct net_bridge *br); void br_multicast_stop(struct net_bridge *br); void br_multicast_dev_del(struct net_bridge *br); @@ -980,6 +982,14 @@ static inline void br_multicast_init(struct net_bridge *br) { } +static inline void br_multicast_join_snoopers(struct net_bridge *br) +{ +} + +static inline void br_multicast_leave_snoopers(struct net_bridge *br) +{ +} + static inline void br_multicast_open(struct net_bridge *br) { } diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index d07008678d32..701cad646b20 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -266,8 +266,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags, } masterv = br_vlan_get_master(br, v->vid, extack); - if (!masterv) + if (!masterv) { + err = -ENOMEM; goto out_filt; + } v->brvlan = masterv; if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) { v->stats = diff --git a/net/can/isotp.c b/net/can/isotp.c index d78ab13bd8be..7839c3b9e5be 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -865,6 +865,14 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (!size || size > MAX_MSG_LENGTH) return -EINVAL; + /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ + off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; + + /* does the given data fit into a single frame for SF_BROADCAST? */ + if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) && + (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) + return -EINVAL; + err = memcpy_from_msg(so->tx.buf, msg, size); if (err < 0) return err; @@ -891,9 +899,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) cf = (struct canfd_frame *)skb->data; skb_put(skb, so->ll.mtu); - /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ - off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; - /* check for single frame transmission depending on TX_DL */ if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) { /* The message size generally fits into a SingleFrame - good. @@ -1016,7 +1021,7 @@ static int isotp_release(struct socket *sock) hrtimer_cancel(&so->rxtimer); /* remove current filters & unregister */ - if (so->bound) { + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { if (so->ifindex) { struct net_device *dev; @@ -1052,15 +1057,25 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct net_device *dev; int err = 0; int notify_enetdown = 0; + int do_rx_reg = 1; if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)) return -EINVAL; - if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) - return -EADDRNOTAVAIL; + /* do not register frame reception for functional addressing */ + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) + do_rx_reg = 0; + + /* do not validate rx address for functional addressing */ + if (do_rx_reg) { + if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) + return -EADDRNOTAVAIL; - if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) & - (CAN_ERR_FLAG | CAN_RTR_FLAG)) + if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) + return -EADDRNOTAVAIL; + } + + if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) return -EADDRNOTAVAIL; if (!addr->can_ifindex) @@ -1093,13 +1108,14 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; - can_rx_register(net, dev, addr->can_addr.tp.rx_id, - SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk, - "isotp", sk); + if (do_rx_reg) + can_rx_register(net, dev, addr->can_addr.tp.rx_id, + SINGLE_MASK(addr->can_addr.tp.rx_id), + isotp_rcv, sk, "isotp", sk); dev_put(dev); - if (so->bound) { + if (so->bound && do_rx_reg) { /* unregister old filter */ if (so->ifindex) { dev = dev_get_by_index(net, so->ifindex); @@ -1157,6 +1173,9 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname, if (level != SOL_CAN_ISOTP) return -EINVAL; + if (so->bound) + return -EISCONN; + switch (optname) { case CAN_ISOTP_OPTS: if (optlen != sizeof(struct can_isotp_options)) @@ -1299,7 +1318,7 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound) + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) can_rx_unregister(dev_net(dev), dev, so->rxid, SINGLE_MASK(so->rxid), isotp_rcv, sk); diff --git a/net/core/dev.c b/net/core/dev.c index ce8fea2e2788..bde98cfd166f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8958,6 +8958,17 @@ static struct bpf_prog *dev_xdp_prog(struct net_device *dev, return dev->xdp_state[mode].prog; } +static u8 dev_xdp_prog_count(struct net_device *dev) +{ + u8 count = 0; + int i; + + for (i = 0; i < __MAX_XDP_MODE; i++) + if (dev->xdp_state[i].prog || dev->xdp_state[i].link) + count++; + return count; +} + u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) { struct bpf_prog *prog = dev_xdp_prog(dev, mode); @@ -9048,6 +9059,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack struct bpf_xdp_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog, u32 flags) { + unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES); struct bpf_prog *cur_prog; enum bpf_xdp_mode mode; bpf_op_t bpf_op; @@ -9063,11 +9075,17 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); return -EINVAL; } - /* just one XDP mode bit should be set, zero defaults to SKB mode */ - if (hweight32(flags & XDP_FLAGS_MODES) > 1) { + /* just one XDP mode bit should be set, zero defaults to drv/skb mode */ + if (num_modes > 1) { NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); return -EINVAL; } + /* avoid ambiguity if offload + drv/skb mode progs are both loaded */ + if (!num_modes && dev_xdp_prog_count(dev) > 1) { + NL_SET_ERR_MSG(extack, + "More than one program loaded, unset mode is ambiguous"); + return -EINVAL; + } /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); diff --git a/net/core/devlink.c b/net/core/devlink.c index 88c0ac8ed444..ee828e4b1007 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6993,7 +6993,6 @@ static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; struct devlink_trap_item *trap_item; - int err; if (list_empty(&devlink->trap_list)) return -EOPNOTSUPP; @@ -7004,11 +7003,7 @@ static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, return -ENOENT; } - err = devlink_trap_action_set(devlink, trap_item, info); - if (err) - return err; - - return 0; + return devlink_trap_action_set(devlink, trap_item, info); } static struct devlink_trap_group_item * diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index d4474c812b64..715b67f6c62f 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -381,10 +381,8 @@ static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { if (this->release == release && - this->indr.cb_priv == cb_priv) { + this->indr.cb_priv == cb_priv) list_move(&this->indr.list, cleanup_list); - return; - } } } diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 7d3438215f32..2f7940bcf715 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, { int ret; - /* Preempt disable is needed to protect per-cpu redirect_info between - * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and - * access to maps strictly require a rcu_read_lock() for protection, - * mixing with BH RCU lock doesn't work. + /* Migration disable and BH disable are needed to protect per-cpu + * redirect_info between BPF prog and skb_do_redirect(). */ - preempt_disable(); + migrate_disable(); + local_bh_disable(); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(lwt->prog, skb); @@ -78,7 +77,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, break; } - preempt_enable(); + local_bh_enable(); + migrate_enable(); return ret; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 60917ff4a00b..bb0596c41b3e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -139,7 +139,7 @@ bool lockdep_rtnl_is_held(void) EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ -static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; +static struct rtnl_link __rcu *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) { @@ -157,7 +157,7 @@ static inline int rtm_msgindex(int msgtype) static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) { - struct rtnl_link **tab; + struct rtnl_link __rcu **tab; if (protocol >= ARRAY_SIZE(rtnl_msg_handlers)) protocol = PF_UNSPEC; @@ -166,7 +166,7 @@ static struct rtnl_link *rtnl_get_link(int protocol, int msgtype) if (!tab) tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]); - return tab[msgtype]; + return rcu_dereference_rtnl(tab[msgtype]); } static int rtnl_register_internal(struct module *owner, @@ -183,7 +183,7 @@ static int rtnl_register_internal(struct module *owner, msgindex = rtm_msgindex(msgtype); rtnl_lock(); - tab = rtnl_msg_handlers[protocol]; + tab = rtnl_dereference(rtnl_msg_handlers[protocol]); if (tab == NULL) { tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL); if (!tab) @@ -286,7 +286,8 @@ void rtnl_register(int protocol, int msgtype, */ int rtnl_unregister(int protocol, int msgtype) { - struct rtnl_link **tab, *link; + struct rtnl_link __rcu **tab; + struct rtnl_link *link; int msgindex; BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); @@ -299,7 +300,7 @@ int rtnl_unregister(int protocol, int msgtype) return -ENOENT; } - link = tab[msgindex]; + link = rtnl_dereference(tab[msgindex]); rcu_assign_pointer(tab[msgindex], NULL); rtnl_unlock(); @@ -318,20 +319,21 @@ EXPORT_SYMBOL_GPL(rtnl_unregister); */ void rtnl_unregister_all(int protocol) { - struct rtnl_link **tab, *link; + struct rtnl_link __rcu **tab; + struct rtnl_link *link; int msgindex; BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); rtnl_lock(); - tab = rtnl_msg_handlers[protocol]; + tab = rtnl_dereference(rtnl_msg_handlers[protocol]); if (!tab) { rtnl_unlock(); return; } RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { - link = tab[msgindex]; + link = rtnl_dereference(tab[msgindex]); if (!link) continue; @@ -3754,7 +3756,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) s_idx = 1; for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { - struct rtnl_link **tab; + struct rtnl_link __rcu **tab; struct rtnl_link *link; rtnl_dumpit_func dumpit; @@ -3768,7 +3770,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) if (!tab) continue; - link = tab[type]; + link = rcu_dereference_rtnl(tab[type]); if (!link) continue; diff --git a/net/core/xdp.c b/net/core/xdp.c index 17ffd33c6b18..3a8c9ab4ecbe 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -336,11 +336,10 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); * scenarios (e.g. queue full), it is possible to return the xdp_frame * while still leveraging this protection. The @napi_direct boolean * is used for those calls sites. Thus, allowing for faster recycling - * of xdp_frames/pages in those cases. This path is never used by the - * MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of - * the switch-statement. + * of xdp_frames/pages in those cases. */ -static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) +static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, + struct xdp_buff *xdp) { struct xdp_mem_allocator *xa; struct page *page; @@ -362,6 +361,10 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) page = virt_to_page(data); /* Assumes order0 page*/ put_page(page); break; + case MEM_TYPE_XSK_BUFF_POOL: + /* NB! Only valid from an xdp_buff! */ + xsk_buff_free(xdp); + break; default: /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); @@ -371,13 +374,13 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct) void xdp_return_frame(struct xdp_frame *xdpf) { - __xdp_return(xdpf->data, &xdpf->mem, false); + __xdp_return(xdpf->data, &xdpf->mem, false, NULL); } EXPORT_SYMBOL_GPL(xdp_return_frame); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) { - __xdp_return(xdpf->data, &xdpf->mem, true); + __xdp_return(xdpf->data, &xdpf->mem, true, NULL); } EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); @@ -412,7 +415,7 @@ void xdp_return_frame_bulk(struct xdp_frame *xdpf, struct xdp_mem_allocator *xa; if (mem->type != MEM_TYPE_PAGE_POOL) { - __xdp_return(xdpf->data, &xdpf->mem, false); + __xdp_return(xdpf->data, &xdpf->mem, false, NULL); return; } @@ -437,7 +440,7 @@ EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); void xdp_return_buff(struct xdp_buff *xdp) { - __xdp_return(xdp->data, &xdp->rxq->mem, true); + __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); } /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ @@ -455,18 +458,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem) } EXPORT_SYMBOL_GPL(__xdp_release_frame); -bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, - struct netdev_bpf *bpf) -{ - if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { - NL_SET_ERR_MSG(bpf->extack, - "program loaded with different flags"); - return false; - } - return true; -} -EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); - void xdp_attachment_setup(struct xdp_attachment_info *info, struct netdev_bpf *bpf) { diff --git a/net/dsa/master.c b/net/dsa/master.c index c91de041a91d..5a0f6fec4271 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -308,14 +308,15 @@ static struct lock_class_key dsa_master_addr_list_lock_key; int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) { + int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead; int ret; rtnl_lock(); - ret = dev_set_mtu(dev, ETH_DATA_LEN + cpu_dp->tag_ops->overhead); + ret = dev_set_mtu(dev, mtu); rtnl_unlock(); if (ret) - netdev_warn(dev, "error %d setting MTU to include DSA overhead\n", - ret); + netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", + ret, mtu); /* If we use a tagging format that doesn't have an ethertype * field, make sure that all packets from this point on get diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7efc753e4d9d..4a0498bf6c65 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1850,8 +1850,8 @@ int dsa_slave_create(struct dsa_port *port) ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); rtnl_unlock(); if (ret && ret != -EOPNOTSUPP) - dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n", - ret, port->index); + dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", + ret, ETH_DATA_LEN, port->index); netif_carrier_off(slave_dev); diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c index 1fb3603d92ad..0515d6604b3b 100644 --- a/net/ethtool/bitset.c +++ b/net/ethtool/bitset.c @@ -628,6 +628,8 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask, return ret; change_bits = nla_get_u32(tb[ETHTOOL_A_BITSET_SIZE]); + if (change_bits > nbits) + change_bits = nbits; bitmap_from_arr32(val, nla_data(tb[ETHTOOL_A_BITSET_VALUE]), change_bits); if (change_bits < nbits) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index b87140a1fa28..cdf6ec5aa45d 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -825,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, if (has_gw && has_via) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); - goto errout; + return -EINVAL; } return 0; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index d1e04d2b5170..563b62b76a5f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, local_bh_disable(); addend = xt_write_recseq_begin(); - private = READ_ONCE(table->private); /* Address dependency. */ + private = rcu_access_pointer(table->private); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; @@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care @@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size, unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; - struct xt_table_info *private = table->private; + struct xt_table_info *private = xt_table_get_private_protected(table); int ret = 0; void *loc_cpu_entry; @@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len) t = xt_request_find_table_lock(net, NFPROTO_ARP, name); if (!IS_ERR(t)) { struct arpt_getinfo info; - const struct xt_table_info *private = t->private; + const struct xt_table_info *private = xt_table_get_private_protected(t); #ifdef CONFIG_COMPAT struct xt_table_info tmp; @@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR(t)) { - const struct xt_table_info *private = t->private; + const struct xt_table_info *private = xt_table_get_private_protected(t); if (get.size == private->size) ret = copy_entries_to_user(private->size, @@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len) } local_bh_disable(); - private = t->private; + private = xt_table_get_private_protected(t); if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; @@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size, void __user *userptr) { struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); void __user *pos; unsigned int size; int ret = 0; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f15bc21d7301..6e2851f8d3a3 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb, WARN_ON(!(table->valid_hooks & (1 << hook))); local_bh_disable(); addend = xt_write_recseq_begin(); - private = READ_ONCE(table->private); /* Address dependency. */ + private = rcu_access_pointer(table->private); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; @@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size, unsigned int off, num; const struct ipt_entry *e; struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); int ret = 0; const void *loc_cpu_entry; @@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len) t = xt_request_find_table_lock(net, AF_INET, name); if (!IS_ERR(t)) { struct ipt_getinfo info; - const struct xt_table_info *private = t->private; + const struct xt_table_info *private = xt_table_get_private_protected(t); #ifdef CONFIG_COMPAT struct xt_table_info tmp; @@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR(t)) { - const struct xt_table_info *private = t->private; + const struct xt_table_info *private = xt_table_get_private_protected(t); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); @@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len) } local_bh_disable(); - private = t->private; + private = xt_table_get_private_protected(t); if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; @@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); void __user *pos; unsigned int size; int ret = 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3c99d48b65d8..ed42d2193c5c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4082,7 +4082,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, } #ifdef CONFIG_MMU case TCP_ZEROCOPY_RECEIVE: { - struct tcp_zerocopy_receive zc; + struct tcp_zerocopy_receive zc = {}; int err; if (get_user(len, optlen)) @@ -4099,7 +4099,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc); release_sock(sk); - if (len == sizeof(zc)) + if (len >= offsetofend(struct tcp_zerocopy_receive, err)) goto zerocopy_rcv_sk_err; switch (len) { case offsetofend(struct tcp_zerocopy_receive, err): diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9e8a6c1aa019..d6ad3b5c38e7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -510,7 +510,6 @@ static void tcp_init_buffer_space(struct sock *sk) if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); - tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); tcp_mstamp_refresh(tp); tp->rcvq_space.time = tp->tcp_mstamp; tp->rcvq_space.seq = tp->copied_seq; @@ -534,6 +533,8 @@ static void tcp_init_buffer_space(struct sock *sk) tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_jiffies32; + tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, + (u32)TCP_INIT_CWND * tp->advmss); } /* 4. Recalculate window clamp after socket hit its memory bounds. */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index af2338294598..58207c7769d0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -984,7 +984,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? - tcp_rsk(req)->syn_tos & ~INET_ECN_MASK : + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | + (inet_sk(sk)->tos & INET_ECN_MASK) : inet_sk(sk)->tos; if (!INET_ECN_is_capable(tos) && @@ -1546,7 +1547,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = prandom_u32(); - /* Set ToS of the new socket based upon the value of incoming SYN. */ + /* Set ToS of the new socket based upon the value of incoming SYN. + * ECT bits are set later in tcp_init_transfer(). + */ if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 41880d3521ed..f322e798a351 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1882,7 +1882,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) * window, and remember whether we were cwnd-limited then. */ if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out) { + tp->packets_out > tp->max_packets_out || + is_cwnd_limited) { tp->max_packets_out = tp->packets_out; tp->max_packets_seq = tp->snd_nxt; tp->is_cwnd_limited = is_cwnd_limited; @@ -2706,6 +2707,10 @@ repair: else tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); + is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); + if (likely(sent_pkts || is_cwnd_limited)) + tcp_cwnd_validate(sk, is_cwnd_limited); + if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; @@ -2713,8 +2718,6 @@ repair: /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk, false); - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); - tcp_cwnd_validate(sk, is_cwnd_limited); return false; } return !tp->packets_out && !tcp_write_queue_empty(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index a3f105227ccc..dece195f212c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2175,7 +2175,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); ret = udp_queue_rcv_one_skb(sk, skb); if (ret > 0) - ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret); + ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret); } return 0; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2e2119bfcf13..c4f532f4d311 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb, local_bh_disable(); addend = xt_write_recseq_begin(); - private = READ_ONCE(table->private); /* Address dependency. */ + private = rcu_access_pointer(table->private); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; @@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care @@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size, unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); int ret = 0; const void *loc_cpu_entry; @@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len) t = xt_request_find_table_lock(net, AF_INET6, name); if (!IS_ERR(t)) { struct ip6t_getinfo info; - const struct xt_table_info *private = t->private; + const struct xt_table_info *private = xt_table_get_private_protected(t); #ifdef CONFIG_COMPAT struct xt_table_info tmp; @@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR(t)) { - struct xt_table_info *private = t->private; + struct xt_table_info *private = xt_table_get_private_protected(t); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); @@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len) } local_bh_disable(); - private = t->private; + private = xt_table_get_private_protected(t); if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; @@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; - const struct xt_table_info *private = table->private; + const struct xt_table_info *private = xt_table_get_private_protected(table); void __user *pos; unsigned int size; int ret = 0; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 233da43dd8d7..ff691d9f4a04 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -190,18 +190,13 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt) { struct dst_entry *dst = skb_dst(skb); struct rpl_iptunnel_encap *tinfo; - int err = 0; if (skb->protocol != htons(ETH_P_IPV6)) return -EINVAL; tinfo = rpl_encap_lwtunnel(dst->lwtstate); - err = rpl_do_srh_inline(skb, rlwt, tinfo->srh); - if (err) - return err; - - return 0; + return rpl_do_srh_inline(skb, rlwt, tinfo->srh); } static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1a1510513739..e254569a3005 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -528,7 +528,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ? - tcp_rsk(req)->syn_tos & ~INET_ECN_MASK : + (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | + (np->tclass & INET_ECN_MASK) : np->tclass; if (!INET_ECN_is_capable(tclass) && @@ -1325,7 +1326,9 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * if (np->repflow) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); - /* Set ToS of the new socket based upon the value of incoming SYN. */ + /* Set ToS of the new socket based upon the value of incoming SYN. + * ECT bits are set later in tcp_init_transfer(). + */ if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index db7d888914fa..882f028992c3 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -587,7 +587,7 @@ static void __iucv_auto_name(struct iucv_sock *iucv) static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { - struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); char uid[sizeof(sa->siucv_user_id)]; struct sock *sk = sock->sk; struct iucv_sock *iucv; @@ -691,7 +691,7 @@ static int iucv_sock_autobind(struct sock *sk) static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) { - struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned char user_data[16]; @@ -738,7 +738,7 @@ done: static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { - struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; + DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int err; @@ -874,7 +874,7 @@ done: static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { - struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; + DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr); struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index cd4cf84a7f99..cce28e3b2232 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -250,10 +250,10 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; - capab = (u16)(amsdu << 0); /* bit 0 A-MSDU support */ - capab |= (u16)(policy << 1); /* bit 1 aggregation policy */ - capab |= (u16)(tid << 2); /* bit 5:2 TID number */ - capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ + capab = u16_encode_bits(amsdu, IEEE80211_ADDBA_PARAM_AMSDU_MASK); + capab |= u16_encode_bits(policy, IEEE80211_ADDBA_PARAM_POLICY_MASK); + capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK); + capab |= u16_encode_bits(buf_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index b37c8a983d88..430a58587538 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -95,10 +95,10 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ; mgmt->u.action.u.addba_req.dialog_token = dialog_token; - capab = (u16)(1 << 0); /* bit 0 A-MSDU support */ - capab |= (u16)(1 << 1); /* bit 1 aggregation policy */ - capab |= (u16)(tid << 2); /* bit 5:2 TID number */ - capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ + capab = IEEE80211_ADDBA_PARAM_AMSDU_MASK; + capab |= IEEE80211_ADDBA_PARAM_POLICY_MASK; + capab |= u16_encode_bits(tid, IEEE80211_ADDBA_PARAM_TID_MASK); + capab |= u16_encode_bits(agg_size, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); @@ -950,8 +950,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; - tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; - buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; + tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK); + buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); txq = sta->sta.txq[tid]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 454432ced0c9..c4c70e30ad7f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -405,6 +405,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_WEP104: if (WARN_ON_ONCE(fips_enabled)) return -EINVAL; + break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: @@ -1121,10 +1122,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.allow_p2p_go_ps = sdata->vif.p2p; sdata->vif.bss_conf.twt_responder = params->twt_responder; - memcpy(&sdata->vif.bss_conf.he_obss_pd, ¶ms->he_obss_pd, - sizeof(struct ieee80211_he_obss_pd)); - memcpy(&sdata->vif.bss_conf.he_bss_color, ¶ms->he_bss_color, - sizeof(struct ieee80211_he_bss_color)); + sdata->vif.bss_conf.he_obss_pd = params->he_obss_pd; + sdata->vif.bss_conf.he_bss_color = params->he_bss_color; sdata->vif.bss_conf.s1g = params->chandef.chan->band == NL80211_BAND_S1GHZ; @@ -3297,6 +3296,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, if (cfg80211_get_chandef_type(¶ms->chandef) != cfg80211_get_chandef_type(&sdata->u.ibss.chandef)) return -EINVAL; + break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: @@ -3448,7 +3448,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, IEEE80211_QUEUE_STOP_REASON_CSA); cfg80211_ch_switch_started_notify(sdata->dev, &sdata->csa_chandef, - params->count); + params->count, params->block_tx); if (changed) { ieee80211_bss_info_change_notify(sdata, changed); @@ -4073,6 +4073,17 @@ static int ieee80211_reset_tid_config(struct wiphy *wiphy, return ret; } +static int ieee80211_set_sar_specs(struct wiphy *wiphy, + struct cfg80211_sar_specs *sar) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + if (!local->ops->set_sar_specs) + return -EOPNOTSUPP; + + return local->ops->set_sar_specs(&local->hw, sar); +} + const struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -4175,4 +4186,5 @@ const struct cfg80211_ops mac80211_config_ops = { .probe_mesh_link = ieee80211_probe_mesh_link, .set_tid_config = ieee80211_set_tid_config, .reset_tid_config = ieee80211_reset_tid_config, + .set_sar_specs = ieee80211_set_sar_specs, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index b6c80a45b9f5..907bb1f748a1 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -9,6 +9,7 @@ #include <net/cfg80211.h> #include "ieee80211_i.h" #include "driver-ops.h" +#include "rate.h" static int ieee80211_chanctx_num_assigned(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) @@ -191,11 +192,13 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local, return NULL; } -enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) +static enum nl80211_chan_width ieee80211_get_sta_bw(struct sta_info *sta) { - switch (sta->bandwidth) { + enum ieee80211_sta_rx_bandwidth width = ieee80211_sta_cap_rx_bw(sta); + + switch (width) { case IEEE80211_STA_RX_BW_20: - if (sta->ht_cap.ht_supported) + if (sta->sta.ht_cap.ht_supported) return NL80211_CHAN_WIDTH_20; else return NL80211_CHAN_WIDTH_20_NOHT; @@ -232,7 +235,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata) !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) continue; - max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); + max_bw = max(max_bw, ieee80211_get_sta_bw(sta)); } rcu_read_unlock(); @@ -343,10 +346,42 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH); } +static void ieee80211_chan_bw_change(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) +{ + struct sta_info *sta; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[ctx->conf.def.chan->band]; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &local->sta_list, + list) { + enum ieee80211_sta_rx_bandwidth new_sta_bw; + + if (!ieee80211_sdata_running(sta->sdata)) + continue; + + if (rcu_access_pointer(sta->sdata->vif.chanctx_conf) != + &ctx->conf) + continue; + + new_sta_bw = ieee80211_sta_cur_vht_bw(sta); + if (new_sta_bw == sta->sta.bandwidth) + continue; + + sta->sta.bandwidth = new_sta_bw; + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_BW_CHANGED); + } + rcu_read_unlock(); +} + static void ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *chandef) { + enum nl80211_chan_width width; + if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { ieee80211_recalc_chanctx_min_def(local, ctx); return; @@ -354,7 +389,25 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); + width = ctx->conf.def.width; ctx->conf.def = *chandef; + + /* expected to handle only 20/40/80/160 channel widths */ + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + break; + default: + WARN_ON(1); + } + + if (chandef->width < width) + ieee80211_chan_bw_change(local, ctx); + drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH); ieee80211_recalc_chanctx_min_def(local, ctx); @@ -362,6 +415,9 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, local->_oper_chandef = *chandef; ieee80211_hw_config(local, 0); } + + if (chandef->width > width) + ieee80211_chan_bw_change(local, ctx); } static struct ieee80211_chanctx * @@ -1051,8 +1107,14 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata) if (WARN_ON(!chandef)) return -EINVAL; + if (old_ctx->conf.def.width > new_ctx->conf.def.width) + ieee80211_chan_bw_change(local, new_ctx); + ieee80211_change_chanctx(local, new_ctx, chandef); + if (old_ctx->conf.def.width < new_ctx->conf.def.width) + ieee80211_chan_bw_change(local, new_ctx); + vif_chsw[0].vif = &sdata->vif; vif_chsw[0].old_ctx = &old_ctx->conf; vif_chsw[0].new_ctx = &new_ctx->conf; @@ -1443,6 +1505,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); ieee80211_recalc_chanctx_min_def(local, ctx); + ieee80211_chan_bw_change(local, ctx); list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, reserved_chanctx_list) { diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 90470392fdaa..48f144f107d5 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -53,7 +53,7 @@ static const struct file_operations name## _ops = { \ DEBUGFS_READONLY_FILE_OPS(name) #define DEBUGFS_ADD(name) \ - debugfs_create_file(#name, 0400, phyd, local, &name## _ops); + debugfs_create_file(#name, 0400, phyd, local, &name## _ops) #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, phyd, local, &name## _ops); diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 98a713475e0f..f53dec8a3d5c 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -319,7 +319,7 @@ KEY_OPS(key); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, key->debugfs.dir, \ - key, &key_##name##_ops); + key, &key_##name##_ops) #define DEBUGFS_ADD_W(name) \ debugfs_create_file(#name, 0600, key->debugfs.dir, \ key, &key_##name##_ops); diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 9fc8ce214322..0ad3860852ff 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -642,7 +642,7 @@ IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ - sdata, &name##_ops); + sdata, &name##_ops) #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) @@ -711,7 +711,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) struct dentry *dir = debugfs_create_dir("mesh_stats", sdata->vif.debugfs_dir); #define MESHSTATS_ADD(name)\ - debugfs_create_file(#name, 0400, dir, sdata, &name##_ops); + debugfs_create_file(#name, 0400, dir, sdata, &name##_ops) MESHSTATS_ADD(fwded_mcast); MESHSTATS_ADD(fwded_unicast); @@ -728,7 +728,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) sdata->vif.debugfs_dir); #define MESHPARAMS_ADD(name) \ - debugfs_create_file(#name, 0600, dir, sdata, &name##_ops); + debugfs_create_file(#name, 0600, dir, sdata, &name##_ops) MESHPARAMS_ADD(dot11MeshMaxRetries); MESHPARAMS_ADD(dot11MeshRetryTimeout); diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 6a51b8b58f9e..eb4bb79d936a 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -985,7 +985,7 @@ STA_OPS(he_capa); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ - sta->debugfs_dir, sta, &sta_ ##name## _ops); + sta->debugfs_dir, sta, &sta_ ##name## _ops) #define DEBUGFS_ADD_COUNTER(name, field) \ debugfs_create_ulong(#name, 0400, sta->debugfs_dir, &sta->field); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index cde2e3f4fbcd..8bf9c0e974d6 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -452,7 +452,9 @@ struct ieee80211_if_managed { unsigned long probe_timeout; int probe_send_count; bool nullfunc_failed; - bool connection_loss; + u8 connection_loss:1, + driver_disconnect:1, + reconnect:1; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; @@ -1587,13 +1589,8 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) { WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START && status->flag & RX_FLAG_MACTIME_END); - if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END)) - return true; - /* can't handle non-legacy preamble yet */ - if (status->flag & RX_FLAG_MACTIME_PLCP_START && - status->encoding == RX_ENC_LEGACY) - return true; - return false; + return !!(status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END | + RX_FLAG_MACTIME_PLCP_START)); } void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata); @@ -2280,7 +2277,6 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, enum ieee80211_chanctx_mode chanmode, u8 radar_detect); int ieee80211_max_num_channels(struct ieee80211_local *local); -enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta); void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f5d4ceb72882..3b9ec4ef81c3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -940,6 +940,8 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) return ret; } + set_bit(SDATA_STATE_RUNNING, &sdata->state); + ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR); if (ret) { kfree(sdata); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 8c5f829ff6d7..a4817aa4b171 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -1300,3 +1300,52 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, return &key->conf; } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); + +void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + key->u.aes_cmac.icverrors++; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + key->u.aes_gmac.icverrors++; + break; + default: + /* ignore the others for now, we don't keep counters now */ + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure); + +void ieee80211_key_replay(struct ieee80211_key_conf *keyconf) +{ + struct ieee80211_key *key; + + key = container_of(keyconf, struct ieee80211_key, conf); + + switch (key->conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + key->u.ccmp.replays++; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + key->u.aes_cmac.replays++; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + key->u.aes_gmac.replays++; + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + key->u.gcmp.replays++; + break; + } +} +EXPORT_SYMBOL_GPL(ieee80211_key_replay); diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 48f31ac9233c..620ecf922408 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void) atomic_set(&newtbl->entries, 0); spin_lock_init(&newtbl->gates_lock); spin_lock_init(&newtbl->walk_lock); + rhashtable_init(&newtbl->rhead, &mesh_rht_params); return newtbl; } @@ -773,9 +774,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) goto free_path; } - rhashtable_init(&tbl_path->rhead, &mesh_rht_params); - rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); - sdata->u.mesh.mesh_paths = tbl_path; sdata->u.mesh.mpp_paths = tbl_mpp; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 6adfcb9c06dc..0e4d950cf907 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1417,6 +1417,17 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + if (sdata->vif.bss_conf.chandef.chan->band != + csa_ie.chandef.chan->band) { + sdata_info(sdata, + "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + ifmgd->associated->bssid, + csa_ie.chandef.chan->center_freq, + csa_ie.chandef.width, csa_ie.chandef.center_freq1, + csa_ie.chandef.center_freq2); + goto lock_and_drop_connection; + } + if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chandef, IEEE80211_CHAN_DISABLED)) { sdata_info(sdata, @@ -1429,9 +1440,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, csa_ie.chandef.width, csa_ie.chandef.center_freq1, csa_ie.chandef.freq1_offset, csa_ie.chandef.center_freq2); - ieee80211_queue_work(&local->hw, - &ifmgd->csa_connection_drop_work); - return; + goto lock_and_drop_connection; } if (cfg80211_chandef_identical(&csa_ie.chandef, @@ -1493,6 +1502,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, sdata->csa_chandef = csa_ie.chandef; sdata->csa_block_tx = csa_ie.mode; ifmgd->csa_ignored_same_chan = false; + ifmgd->beacon_crc_valid = false; if (sdata->csa_block_tx) ieee80211_stop_vif_queues(local, sdata, @@ -1500,7 +1510,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->mtx); cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, - csa_ie.count); + csa_ie.count, csa_ie.mode); if (local->ops->channel_switch) { /* use driver's channel switch callback */ @@ -1516,6 +1526,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, TU_TO_EXP_TIME((csa_ie.count - 1) * cbss->beacon_interval)); return; + lock_and_drop_connection: + mutex_lock(&local->mtx); + mutex_lock(&local->chanctx_mtx); drop_connection: /* * This is just so that the disconnect flow will know that @@ -1560,9 +1573,17 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, chan_increment = 1; break; case NL80211_BAND_5GHZ: - case NL80211_BAND_6GHZ: chan_increment = 4; break; + case NL80211_BAND_6GHZ: + /* + * In the 6 GHz band, the "maximum transmit power level" + * field in the triplets is reserved, and thus will be + * zero and we shouldn't use it to control TX power. + * The actual TX power will be given in the transmit + * power envelope element instead. + */ + return false; } /* find channel */ @@ -2382,6 +2403,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, /* clear bssid only after building the needed mgmt frames */ eth_zero_addr(ifmgd->bssid); + sdata->vif.bss_conf.ssid_len = 0; + /* remove AP and TDLS peers */ sta_info_flush(sdata); @@ -2720,7 +2743,7 @@ EXPORT_SYMBOL(ieee80211_ap_probereq_get); static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, const u8 *buf, size_t len, bool tx, - u16 reason) + u16 reason, bool reconnect) { struct ieee80211_event event = { .type = MLME_EVENT, @@ -2729,7 +2752,7 @@ static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, }; if (tx) - cfg80211_tx_mlme_mgmt(sdata->dev, buf, len); + cfg80211_tx_mlme_mgmt(sdata->dev, buf, len, reconnect); else cfg80211_rx_mlme_mgmt(sdata->dev, buf, len); @@ -2751,13 +2774,18 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) tx = !sdata->csa_block_tx; - /* AP is probably out of range (or not reachable for another reason) so - * remove the bss struct for that AP. - */ - cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated); + if (!ifmgd->driver_disconnect) { + /* + * AP is probably out of range (or not reachable for another + * reason) so remove the bss struct for that AP. + */ + cfg80211_unlink_bss(local->hw.wiphy, ifmgd->associated); + } ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, - WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + ifmgd->driver_disconnect ? + WLAN_REASON_DEAUTH_LEAVING : + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, tx, frame_buf); mutex_lock(&local->mtx); sdata->vif.csa_active = false; @@ -2770,7 +2798,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) mutex_unlock(&local->mtx); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, - WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, + ifmgd->reconnect); + ifmgd->reconnect = false; sdata_unlock(sdata); } @@ -2789,6 +2819,13 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work) sdata_info(sdata, "Connection to AP %pM lost\n", ifmgd->bssid); __ieee80211_disconnect(sdata); + ifmgd->connection_loss = false; + } else if (ifmgd->driver_disconnect) { + sdata_info(sdata, + "Driver requested disconnection from AP %pM\n", + ifmgd->bssid); + __ieee80211_disconnect(sdata); + ifmgd->driver_disconnect = false; } else { ieee80211_mgd_probe_ap(sdata, true); } @@ -2827,6 +2864,21 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_connection_loss); +void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_hw *hw = &sdata->local->hw; + + trace_api_disconnect(sdata, reconnect); + + if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) + return; + + sdata->u.mgd.driver_disconnect = true; + sdata->u.mgd.reconnect = reconnect; + ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); +} +EXPORT_SYMBOL(ieee80211_disconnect); static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, bool assoc) @@ -3130,7 +3182,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, - reason_code); + reason_code, false); return; } @@ -3179,7 +3231,8 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, 0, 0, false, NULL); - ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code); + ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code, + false); } static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@ -3199,8 +3252,8 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, *have_higher_than_11mbit = true; /* - * Skip HT, VHT and HE BSS membership selectors since they're - * not rates. + * Skip HT, VHT, HE and SAE H2E only BSS membership selectors + * since they're not rates. * * Note: Even though the membership selector and the basic * rate flag share the same bit, they are not exactly @@ -3208,7 +3261,8 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, */ if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) || supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) || - supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY)) + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E)) continue; for (j = 0; j < sband->n_bitrates; j++) { @@ -3494,14 +3548,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); - bss_conf->multi_sta_back_32bit = - sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP; - - bss_conf->ack_enabled = - sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_ACK_EN; - bss_conf->uora_exists = !!elems->uora_element; if (elems->uora_element) bss_conf->uora_ocw_range = elems->uora_element[0]; @@ -4199,7 +4245,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, true, deauth_buf); ieee80211_report_disconnect(sdata, deauth_buf, sizeof(deauth_buf), true, - WLAN_REASON_DEAUTH_LEAVING); + WLAN_REASON_DEAUTH_LEAVING, + false); return; } @@ -4344,7 +4391,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - reason); + reason, false); } static int ieee80211_auth(struct ieee80211_sub_if_data *sdata) @@ -4716,7 +4763,8 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + IEEE80211_DEAUTH_FRAME_LEN, + false); } /* This is a bit of a hack - we should find a better and more generic @@ -5430,7 +5478,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - WLAN_REASON_UNSPECIFIED); + WLAN_REASON_UNSPECIFIED, + false); } sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -5471,6 +5520,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *assoc_data; const struct cfg80211_bss_ies *beacon_ies; struct ieee80211_supported_band *sband; + struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; const u8 *ssidie, *ht_ie, *vht_ie; int i, err; bool override = false; @@ -5488,6 +5538,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, } memcpy(assoc_data->ssid, ssidie + 2, ssidie[1]); assoc_data->ssid_len = ssidie[1]; + memcpy(bss_conf->ssid, assoc_data->ssid, assoc_data->ssid_len); + bss_conf->ssid_len = assoc_data->ssid_len; rcu_read_unlock(); if (ifmgd->associated) { @@ -5502,7 +5554,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - WLAN_REASON_UNSPECIFIED); + WLAN_REASON_UNSPECIFIED, + false); } if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@ -5801,7 +5854,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_auth_data(sdata, false); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - req->reason_code); + req->reason_code, false); return 0; } @@ -5821,7 +5874,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_destroy_assoc_data(sdata, false, true); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - req->reason_code); + req->reason_code, false); return 0; } @@ -5836,7 +5889,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, req->reason_code, tx, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - req->reason_code); + req->reason_code, false); return 0; } @@ -5869,7 +5922,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, frame_buf); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, - req->reason_code); + req->reason_code, false); return 0; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 062c2b45584e..13b9bcc4865d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -32,16 +32,6 @@ #include "wme.h" #include "rate.h" -static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) -{ - struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&tstats->syncp); - tstats->rx_packets++; - tstats->rx_bytes += len; - u64_stats_update_end(&tstats->syncp); -} - /* * monitor mode reception * @@ -842,7 +832,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, if (skb) { skb->dev = sdata->dev; - ieee80211_rx_stats(skb->dev, skb->len); + dev_sw_netstats_rx_add(skb->dev, skb->len); netif_receive_skb(skb); } } @@ -1757,7 +1747,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { sta->rx_stats.last_rx = jiffies; } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && - is_multicast_ether_addr(hdr->addr1)) { + !is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. @@ -2559,7 +2549,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) skb = rx->skb; xmit_skb = NULL; - ieee80211_rx_stats(dev, skb->len); + dev_sw_netstats_rx_add(dev, skb->len); if (rx->sta) { /* The seqno index has the same property as needed @@ -3698,7 +3688,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, } prev_dev = sdata->dev; - ieee80211_rx_stats(sdata->dev, skb->len); + dev_sw_netstats_rx_add(sdata->dev, skb->len); } if (prev_dev) { @@ -4411,7 +4401,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, skb->dev = fast_rx->dev; - ieee80211_rx_stats(fast_rx->dev, skb->len); + dev_sw_netstats_rx_add(fast_rx->dev, skb->len); /* The seqno index has the same property as needed * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 89723907a094..601322e16957 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH -* Copyright (C) 2018 - 2019 Intel Corporation +* Copyright (C) 2018 - 2020 Intel Corporation */ #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) @@ -2086,6 +2086,27 @@ TRACE_EVENT(api_connection_loss, ) ); +TRACE_EVENT(api_disconnect, + TP_PROTO(struct ieee80211_sub_if_data *sdata, bool reconnect), + + TP_ARGS(sdata, reconnect), + + TP_STRUCT__entry( + VIF_ENTRY + __field(int, reconnect) + ), + + TP_fast_assign( + VIF_ASSIGN; + __entry->reconnect = reconnect; + ), + + TP_printk( + VIF_PR_FMT " reconnect:%d", + VIF_PR_ARG, __entry->reconnect + ) +); + TRACE_EVENT(api_cqm_rssi_notify, TP_PROTO(struct ieee80211_sub_if_data *sdata, enum nl80211_cqm_rssi_threshold_event rssi_event, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 01eb08527817..6422da6690f7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -38,16 +38,6 @@ /* misc utils */ -static inline void ieee80211_tx_stats(struct net_device *dev, u32 len) -{ - struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&tstats->syncp); - tstats->tx_packets++; - tstats->tx_bytes += len; - u64_stats_update_end(&tstats->syncp); -} - static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) @@ -3386,7 +3376,7 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, if (key) info->control.hw_key = &key->conf; - ieee80211_tx_stats(skb->dev, skb->len); + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; @@ -4004,7 +3994,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, goto out; } - ieee80211_tx_stats(dev, skb->len); + dev_sw_netstats_tx_add(dev, 1, skb->len); ieee80211_xmit(sdata, sta, skb); } @@ -4231,7 +4221,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; - ieee80211_tx_stats(dev, skb->len); + dev_sw_netstats_tx_add(dev, 1, skb->len); sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8c3c01a1b923..8d3ae6b2f95f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3456,7 +3456,7 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, *chandef = he_chandef; - return false; + return true; } bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper, @@ -3666,6 +3666,7 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, u64 ts = status->mactime; struct rate_info ri; u16 rate; + u8 n_ltf; if (WARN_ON(!ieee80211_have_rx_timestamp(status))) return 0; @@ -3676,11 +3677,58 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, /* Fill cfg80211 rate info */ switch (status->encoding) { + case RX_ENC_HE: + ri.flags |= RATE_INFO_FLAGS_HE_MCS; + ri.mcs = status->rate_idx; + ri.nss = status->nss; + ri.he_ru_alloc = status->he_ru; + if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) + ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11ax_D6.0, section 27.3.4 for + * VHT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + ts += 36; + + /* + * TODO: + * For HE MU PPDU, add the HE-SIG-B. + * For HE ER PPDU, add 8us for the HE-SIG-A. + * For HE TB PPDU, add 4us for the HE-STF. + * Add the HE-LTF durations - variable. + */ + } + + break; case RX_ENC_HT: ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11REVmd_D3.0, section 19.3.2 for + * HT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + if (status->enc_flags & RX_ENC_FLAG_HT_GF) + ts += 24; + else + ts += 32; + + /* + * Add Data HT-LTFs per streams + * TODO: add Extension HT-LTFs, 4us per LTF + */ + n_ltf = ((ri.mcs >> 3) & 3) + 1; + n_ltf = n_ltf == 3 ? 4 : n_ltf; + ts += n_ltf * 4; + } + break; case RX_ENC_VHT: ri.flags |= RATE_INFO_FLAGS_VHT_MCS; @@ -3688,6 +3736,23 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, ri.nss = status->nss; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; + + /* + * See P802.11REVmd_D3.0, section 21.3.2 for + * VHT PPDU format. + */ + if (status->flag & RX_FLAG_MACTIME_PLCP_START) { + mpdu_offset += 2; + ts += 36; + + /* + * Add VHT-LTFs per streams + */ + n_ltf = (ri.nss != 1) && (ri.nss % 2) ? + ri.nss + 1 : ri.nss; + ts += 4 * n_ltf; + } + break; default: WARN_ON(1); @@ -3711,7 +3776,6 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift)); if (status->flag & RX_FLAG_MACTIME_PLCP_START) { - /* TODO: handle HT/VHT preambles */ if (status->band == NL80211_BAND_5GHZ) { ts += 20 << shift; mpdu_offset += 2; diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index fb0e3a657d2d..c3ca97373774 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -465,12 +465,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) * IEEE80211-2016 specification makes higher bandwidth operation * possible on the TDLS link if the peers have wider bandwidth * capability. + * + * However, in this case, and only if the TDLS peer is authorized, + * limit to the tdls_chandef so that the configuration here isn't + * wider than what's actually requested on the channel context. */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && - test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) - return bw; - - bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) && + test_sta_flag(sta, WLAN_STA_AUTHORIZED) && + sta->tdls_chandef.chan) + bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width)); + else + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 84d119436b22..b921cbdd9aaa 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -67,6 +67,7 @@ void mptcp_seq_show(struct seq_file *seq) for (i = 0; mptcp_snmp_list[i].name; i++) seq_puts(seq, " 0"); + seq_putc(seq, '\n'); return; } diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 6b7b4b67f18c..1ca60d9da3ef 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -242,9 +242,6 @@ static void mptcp_parse_option(const struct sk_buff *skb, mp_opt->add_addr = 1; mp_opt->addr_id = *ptr++; - pr_debug("ADD_ADDR%s: id=%d, echo=%d", - (mp_opt->family == MPTCP_ADDR_IPVERSION_6) ? "6" : "", - mp_opt->addr_id, mp_opt->echo); if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) { memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4); ptr += 4; @@ -269,6 +266,9 @@ static void mptcp_parse_option(const struct sk_buff *skb, mp_opt->ahmac = get_unaligned_be64(ptr); ptr += 8; } + pr_debug("ADD_ADDR%s: id=%d, ahmac=%llu, echo=%d, port=%d", + (mp_opt->family == MPTCP_ADDR_IPVERSION_6) ? "6" : "", + mp_opt->addr_id, mp_opt->ahmac, mp_opt->echo, mp_opt->port); break; case MPTCPOPT_RM_ADDR: @@ -587,9 +587,11 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * unsigned int opt_size = *size; struct mptcp_addr_info saddr; bool echo; + bool port; int len; - if (mptcp_pm_should_add_signal_ipv6(msk) && + if ((mptcp_pm_should_add_signal_ipv6(msk) || + mptcp_pm_should_add_signal_port(msk)) && skb && skb_is_tcp_pure_ack(skb)) { pr_debug("drop other suboptions"); opts->suboptions = 0; @@ -598,10 +600,10 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * } if (!mptcp_pm_should_add_signal(msk) || - !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo))) + !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo, &port))) return false; - len = mptcp_add_addr_len(saddr.family, echo); + len = mptcp_add_addr_len(saddr.family, echo, port); if (remaining < len) return false; @@ -609,6 +611,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * if (drop_other_suboptions) *size -= opt_size; opts->addr_id = saddr.id; + if (port) + opts->port = ntohs(saddr.port); if (saddr.family == AF_INET) { opts->suboptions |= OPTION_MPTCP_ADD_ADDR; opts->addr = saddr.addr; @@ -631,7 +635,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * } } #endif - pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo); + pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", + opts->addr_id, opts->ahmac, echo, opts->port); return true; } @@ -797,7 +802,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, mptcp_subflow_fully_established(subflow, mp_opt); fully_established: - if (likely(subflow->pm_notified)) + /* if the subflow is not already linked into the conn_list, we can't + * notify the PM: this subflow is still on the listener queue + * and the PM possibly acquiring the subflow lock could race with + * the listener close + */ + if (likely(subflow->pm_notified) || list_empty(&subflow->node)) return true; subflow->pm_notified = 1; @@ -1070,44 +1080,66 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp, } mp_capable_done: - if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { - if (opts->ahmac) - *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, - TCPOLEN_MPTCP_ADD_ADDR, 0, - opts->addr_id); - else - *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, - TCPOLEN_MPTCP_ADD_ADDR_BASE, - MPTCP_ADDR_ECHO, - opts->addr_id); - memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4); - ptr += 1; + if ((OPTION_MPTCP_ADD_ADDR +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + | OPTION_MPTCP_ADD_ADDR6 +#endif + ) & opts->suboptions) { + u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; + u8 echo = MPTCP_ADDR_ECHO; + +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) + len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; +#endif + + if (opts->port) + len += TCPOLEN_MPTCP_PORT_LEN; + if (opts->ahmac) { - put_unaligned_be64(opts->ahmac, ptr); - ptr += 2; + len += sizeof(opts->ahmac); + echo = 0; } - } + *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, + len, echo, opts->addr_id); + if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) { + memcpy((u8 *)ptr, (u8 *)&opts->addr.s_addr, 4); + ptr += 1; + } #if IS_ENABLED(CONFIG_MPTCP_IPV6) - if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) { - if (opts->ahmac) - *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, - TCPOLEN_MPTCP_ADD_ADDR6, 0, - opts->addr_id); - else - *ptr++ = mptcp_option(MPTCPOPT_ADD_ADDR, - TCPOLEN_MPTCP_ADD_ADDR6_BASE, - MPTCP_ADDR_ECHO, - opts->addr_id); - memcpy((u8 *)ptr, opts->addr6.s6_addr, 16); - ptr += 4; - if (opts->ahmac) { - put_unaligned_be64(opts->ahmac, ptr); - ptr += 2; + else if (OPTION_MPTCP_ADD_ADDR6 & opts->suboptions) { + memcpy((u8 *)ptr, opts->addr6.s6_addr, 16); + ptr += 4; } - } #endif + if (!opts->port) { + if (opts->ahmac) { + put_unaligned_be64(opts->ahmac, ptr); + ptr += 2; + } + } else { + if (opts->ahmac) { + u8 *bptr = (u8 *)ptr; + + put_unaligned_be16(opts->port, bptr); + bptr += 2; + put_unaligned_be64(opts->ahmac, bptr); + bptr += 8; + put_unaligned_be16(TCPOPT_NOP << 8 | + TCPOPT_NOP, bptr); + + ptr += 3; + } else { + put_unaligned_be32(opts->port << 16 | + TCPOPT_NOP << 8 | + TCPOPT_NOP, ptr); + ptr += 1; + } + } + } + if (OPTION_MPTCP_RM_ADDR & opts->suboptions) { *ptr++ = mptcp_option(MPTCPOPT_RM_ADDR, TCPOLEN_MPTCP_RM_ADDR_BASE, diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 75c5040e8d5d..da2ed576f289 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -14,28 +14,43 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, - bool echo) + bool echo, bool port) { - u8 add_addr = READ_ONCE(msk->pm.add_addr_signal); + u8 add_addr = READ_ONCE(msk->pm.addr_signal); pr_debug("msk=%p, local_id=%d", msk, addr->id); + if (add_addr) { + pr_warn("addr_signal error, add_addr=%d", add_addr); + return -EINVAL; + } + msk->pm.local = *addr; add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); if (echo) add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); if (addr->family == AF_INET6) add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); - WRITE_ONCE(msk->pm.add_addr_signal, add_addr); + if (port) + add_addr |= BIT(MPTCP_ADD_ADDR_PORT); + WRITE_ONCE(msk->pm.addr_signal, add_addr); return 0; } int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) { + u8 rm_addr = READ_ONCE(msk->pm.addr_signal); + pr_debug("msk=%p, local_id=%d", msk, local_id); + if (rm_addr) { + pr_warn("addr_signal error, rm_addr=%d", rm_addr); + return -EINVAL; + } + msk->pm.rm_id = local_id; - WRITE_ONCE(msk->pm.rm_addr_signal, true); + rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); + WRITE_ONCE(msk->pm.addr_signal, rm_addr); return 0; } @@ -111,8 +126,14 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk) spin_lock_bh(&pm->lock); - if (READ_ONCE(pm->work_pending)) + /* mptcp_pm_fully_established() can be invoked by multiple + * racing paths - accept() and check_fully_established() + * be sure to serve this event only once. + */ + if (READ_ONCE(pm->work_pending) && + !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); + msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); spin_unlock_bh(&pm->lock); } @@ -156,7 +177,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk, spin_lock_bh(&pm->lock); if (!READ_ONCE(pm->accept_addr)) { - mptcp_pm_announce_addr(msk, addr, true); + mptcp_pm_announce_addr(msk, addr, true, addr->port); mptcp_pm_add_addr_send_ack(msk); } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { pm->remote = *addr; @@ -167,7 +188,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk, void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) { - if (!mptcp_pm_should_add_signal_ipv6(msk)) + if (!mptcp_pm_should_add_signal_ipv6(msk) && + !mptcp_pm_should_add_signal_port(msk)) return; mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); @@ -188,7 +210,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id) /* path manager helpers */ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, - struct mptcp_addr_info *saddr, bool *echo) + struct mptcp_addr_info *saddr, bool *echo, bool *port) { int ret = false; @@ -199,12 +221,13 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, goto out_unlock; *echo = mptcp_pm_should_add_signal_echo(msk); + *port = mptcp_pm_should_add_signal_port(msk); - if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo)) + if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port)) goto out_unlock; *saddr = msk->pm.local; - WRITE_ONCE(msk->pm.add_addr_signal, 0); + WRITE_ONCE(msk->pm.addr_signal, 0); ret = true; out_unlock: @@ -227,7 +250,7 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, goto out_unlock; *rm_id = msk->pm.rm_id; - WRITE_ONCE(msk->pm.rm_addr_signal, false); + WRITE_ONCE(msk->pm.addr_signal, 0); ret = true; out_unlock: @@ -248,8 +271,7 @@ void mptcp_pm_data_init(struct mptcp_sock *msk) msk->pm.subflows = 0; msk->pm.rm_id = 0; WRITE_ONCE(msk->pm.work_pending, false); - WRITE_ONCE(msk->pm.add_addr_signal, 0); - WRITE_ONCE(msk->pm.rm_addr_signal, false); + WRITE_ONCE(msk->pm.addr_signal, 0); WRITE_ONCE(msk->pm.accept_addr, false); WRITE_ONCE(msk->pm.accept_subflow, false); msk->pm.status = 0; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 03f2c28f11f5..5151cfcd6962 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -227,7 +227,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer) if (!mptcp_pm_should_add_signal(msk)) { pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id); - mptcp_pm_announce_addr(msk, &entry->addr, false); + mptcp_pm_announce_addr(msk, &entry->addr, false, entry->addr.port); mptcp_pm_add_addr_send_ack(msk); entry->retrans_times++; } @@ -313,7 +313,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) struct mptcp_pm_addr_entry *local; struct pm_nl_pernet *pernet; - pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id); + pernet = net_generic(sock_net(sk), pm_nl_pernet_id); pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", msk->pm.local_addr_used, msk->pm.local_addr_max, @@ -328,7 +328,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) if (local) { if (mptcp_pm_alloc_anno_list(msk, local)) { msk->pm.add_addr_signaled++; - mptcp_pm_announce_addr(msk, &local->addr, false); + mptcp_pm_announce_addr(msk, &local->addr, false, local->addr.port); mptcp_pm_nl_add_addr_send_ack(msk); } } else { @@ -376,6 +376,7 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) struct sock *sk = (struct sock *)msk; struct mptcp_addr_info remote; struct mptcp_addr_info local; + bool use_port = false; pr_debug("accepted %d:%d remote family %d", msk->pm.add_addr_accepted, msk->pm.add_addr_accept_max, @@ -392,14 +393,16 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) remote = msk->pm.remote; if (!remote.port) remote.port = sk->sk_dport; + else + use_port = true; memset(&local, 0, sizeof(local)); local.family = remote.family; spin_unlock_bh(&msk->pm.lock); - __mptcp_subflow_connect((struct sock *)msk, &local, &remote); + __mptcp_subflow_connect(sk, &local, &remote); spin_lock_bh(&msk->pm.lock); - mptcp_pm_announce_addr(msk, &remote, true); + mptcp_pm_announce_addr(msk, &remote, true, use_port); mptcp_pm_nl_add_addr_send_ack(msk); } @@ -407,7 +410,8 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; - if (!mptcp_pm_should_add_signal_ipv6(msk)) + if (!mptcp_pm_should_add_signal_ipv6(msk) && + !mptcp_pm_should_add_signal_port(msk)) return; __mptcp_flush_join_list(msk); @@ -417,15 +421,22 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk) u8 add_addr; spin_unlock_bh(&msk->pm.lock); - pr_debug("send ack for add_addr6"); + if (mptcp_pm_should_add_signal_ipv6(msk)) + pr_debug("send ack for add_addr6"); + if (mptcp_pm_should_add_signal_port(msk)) + pr_debug("send ack for add_addr_port"); + lock_sock(ssk); tcp_send_ack(ssk); release_sock(ssk); spin_lock_bh(&msk->pm.lock); - add_addr = READ_ONCE(msk->pm.add_addr_signal); - add_addr &= ~BIT(MPTCP_ADD_ADDR_IPV6); - WRITE_ONCE(msk->pm.add_addr_signal, add_addr); + add_addr = READ_ONCE(msk->pm.addr_signal); + if (mptcp_pm_should_add_signal_ipv6(msk)) + add_addr &= ~BIT(MPTCP_ADD_ADDR_IPV6); + if (mptcp_pm_should_add_signal_port(msk)) + add_addr &= ~BIT(MPTCP_ADD_ADDR_PORT); + WRITE_ONCE(msk->pm.addr_signal, add_addr); } } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 57213ff60f78..2540d82742ac 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -701,6 +701,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) int sk_rbuf, ssk_rbuf; bool wake; + /* The peer can send data while we are shutting down this + * subflow at msk destruction time, but we must avoid enqueuing + * more data to the msk receive queue + */ + if (unlikely(subflow->disposable)) + return; + /* move_skbs_to_msk below can legitly clear the data_avail flag, * but we will need later to properly woke the reader, cache its * value @@ -2119,6 +2126,8 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, sock_orphan(ssk); } + subflow->disposable = 1; + /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops * the ssk has been already destroyed, we just need to release the * reference owned by msk; @@ -2126,8 +2135,7 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, if (!inet_csk(ssk)->icsk_ulp_ops) { kfree_rcu(subflow, rcu); } else { - /* otherwise ask tcp do dispose of ssk and subflow ctx */ - subflow->disposable = 1; + /* otherwise tcp will dispose of the ssk and subflow ctx */ __tcp_close(ssk, 0); /* close acquired an extra ref */ @@ -3208,6 +3216,17 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, bool slowpath; slowpath = lock_sock_fast(newsk); + + /* PM/worker can now acquire the first subflow socket + * lock without racing with listener queue cleanup, + * we can notify it, if needed. + */ + subflow = mptcp_subflow_ctx(msk->first); + list_add(&subflow->node, &msk->conn_list); + sock_hold(msk->first); + if (mptcp_is_fully_established(newsk)) + mptcp_pm_fully_established(msk); + mptcp_copy_inaddrs(newsk, msk->first); mptcp_rcv_space_init(msk, msk->first); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index fc56e730fb35..f6c3c686a34a 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -49,14 +49,14 @@ #define TCPOLEN_MPTCP_DSS_MAP64 14 #define TCPOLEN_MPTCP_DSS_CHECKSUM 2 #define TCPOLEN_MPTCP_ADD_ADDR 16 -#define TCPOLEN_MPTCP_ADD_ADDR_PORT 18 +#define TCPOLEN_MPTCP_ADD_ADDR_PORT 20 #define TCPOLEN_MPTCP_ADD_ADDR_BASE 8 -#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 10 +#define TCPOLEN_MPTCP_ADD_ADDR_BASE_PORT 12 #define TCPOLEN_MPTCP_ADD_ADDR6 28 -#define TCPOLEN_MPTCP_ADD_ADDR6_PORT 30 +#define TCPOLEN_MPTCP_ADD_ADDR6_PORT 32 #define TCPOLEN_MPTCP_ADD_ADDR6_BASE 20 -#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 22 -#define TCPOLEN_MPTCP_PORT_LEN 2 +#define TCPOLEN_MPTCP_ADD_ADDR6_BASE_PORT 24 +#define TCPOLEN_MPTCP_PORT_LEN 4 #define TCPOLEN_MPTCP_RM_ADDR_BASE 4 /* MPTCP MP_JOIN flags */ @@ -165,13 +165,16 @@ enum mptcp_pm_status { MPTCP_PM_ADD_ADDR_SEND_ACK, MPTCP_PM_RM_ADDR_RECEIVED, MPTCP_PM_ESTABLISHED, + MPTCP_PM_ALREADY_ESTABLISHED, /* persistent status, set after ESTABLISHED event */ MPTCP_PM_SUBFLOW_ESTABLISHED, }; -enum mptcp_add_addr_status { +enum mptcp_addr_signal_status { MPTCP_ADD_ADDR_SIGNAL, MPTCP_ADD_ADDR_ECHO, MPTCP_ADD_ADDR_IPV6, + MPTCP_ADD_ADDR_PORT, + MPTCP_RM_ADDR_SIGNAL, }; struct mptcp_pm_data { @@ -181,8 +184,7 @@ struct mptcp_pm_data { spinlock_t lock; /*protects the whole PM data */ - u8 add_addr_signal; - bool rm_addr_signal; + u8 addr_signal; bool server_side; bool work_pending; bool accept_addr; @@ -551,40 +553,51 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk, int mptcp_pm_announce_addr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, - bool echo); + bool echo, bool port); int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id); int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id); static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk) { - return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL); + return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_SIGNAL); } static inline bool mptcp_pm_should_add_signal_echo(struct mptcp_sock *msk) { - return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO); + return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_ECHO); } static inline bool mptcp_pm_should_add_signal_ipv6(struct mptcp_sock *msk) { - return READ_ONCE(msk->pm.add_addr_signal) & BIT(MPTCP_ADD_ADDR_IPV6); + return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_IPV6); +} + +static inline bool mptcp_pm_should_add_signal_port(struct mptcp_sock *msk) +{ + return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_ADD_ADDR_PORT); } static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk) { - return READ_ONCE(msk->pm.rm_addr_signal); + return READ_ONCE(msk->pm.addr_signal) & BIT(MPTCP_RM_ADDR_SIGNAL); } -static inline unsigned int mptcp_add_addr_len(int family, bool echo) +static inline unsigned int mptcp_add_addr_len(int family, bool echo, bool port) { - if (family == AF_INET) - return echo ? TCPOLEN_MPTCP_ADD_ADDR_BASE - : TCPOLEN_MPTCP_ADD_ADDR; - return echo ? TCPOLEN_MPTCP_ADD_ADDR6_BASE : TCPOLEN_MPTCP_ADD_ADDR6; + u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE; + + if (family == AF_INET6) + len = TCPOLEN_MPTCP_ADD_ADDR6_BASE; + if (!echo) + len += MPTCPOPT_THMAC_LEN; + if (port) + len += TCPOLEN_MPTCP_PORT_LEN; + + return len; } bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, - struct mptcp_addr_info *saddr, bool *echo); + struct mptcp_addr_info *saddr, bool *echo, bool *port); bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, u8 *rm_id); int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 5f5815a1665f..fefcaf497938 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -614,8 +614,9 @@ create_child: */ inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED); - /* link the newly created socket to the msk */ - mptcp_add_pending_subflow(mptcp_sk(new_msk), ctx); + /* record the newly created socket as the first msk + * subflow, but don't link it yet into conn_list + */ WRITE_ONCE(mptcp_sk(new_msk)->first, child); /* new mpc subflow takes ownership of the newly @@ -1148,13 +1149,18 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP); mptcp_info2sockaddr(remote, &addr); + mptcp_add_pending_subflow(msk, subflow); err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK); if (err && err != -EINPROGRESS) - goto failed; + goto failed_unlink; - mptcp_add_pending_subflow(msk, subflow); return err; +failed_unlink: + spin_lock_bh(&msk->join_list_lock); + list_del(&subflow->node); + spin_unlock_bh(&msk->join_list_lock); + failed: subflow->disposable = 1; sock_release(sf); @@ -1333,9 +1339,10 @@ static void subflow_ulp_release(struct sock *ssk) sk = ctx->conn; if (sk) { /* if the msk has been orphaned, keep the ctx - * alive, will be freed by mptcp_done() + * alive, will be freed by __mptcp_close_ssk(), + * when the subflow is still unaccepted */ - release = ctx->disposable; + release = ctx->disposable || list_empty(&ctx->node); sock_put(sk); } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a11bc8dcaa82..9d6c317878ff 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1724,6 +1724,10 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net, } nla_strscpy(ifname, attr, IFNAMSIZ); + /* nf_tables_netdev_event() is called under rtnl_mutex, this is + * indirectly serializing all the other holders of the commit_mutex with + * the rtnl_mutex. + */ dev = __dev_get_by_name(net, ifname); if (!dev) { err = -ENOENT; @@ -3720,7 +3724,7 @@ cont: return 0; } -static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) +int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) { u64 ms = be64_to_cpu(nla_get_be64(nla)); u64 max = (u64)(~((u64)0)); @@ -3734,7 +3738,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result) return 0; } -static __be64 nf_jiffies64_to_msecs(u64 input) +__be64 nf_jiffies64_to_msecs(u64 input) { return cpu_to_be64(jiffies64_to_msecs(input)); } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a8c4d442231c..8bcd49f14797 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -177,8 +177,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr, } #endif case NFT_CT_ID: - if (!nf_ct_is_confirmed(ct)) - goto err; *dest = nf_ct_get_id(ct); return; default: diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 64ca13a1885b..9af4f93c7f0e 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -157,8 +157,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) return -EINVAL; - timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( - tb[NFTA_DYNSET_TIMEOUT]))); + + err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout); + if (err) + return err; } priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); @@ -267,7 +269,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) goto nla_put_failure; if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, - cpu_to_be64(jiffies_to_msecs(priv->timeout)), + nf_jiffies64_to_msecs(priv->timeout), NFTA_DYNSET_PAD)) goto nla_put_failure; if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr)) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index af22dbe85e2c..acce622582e3 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1349,6 +1349,14 @@ struct xt_counters *xt_counters_alloc(unsigned int counters) } EXPORT_SYMBOL(xt_counters_alloc); +struct xt_table_info +*xt_table_get_private_protected(const struct xt_table *table) +{ + return rcu_dereference_protected(table->private, + mutex_is_locked(&xt[table->af].mutex)); +} +EXPORT_SYMBOL(xt_table_get_private_protected); + struct xt_table_info * xt_replace_table(struct xt_table *table, unsigned int num_counters, @@ -1356,7 +1364,6 @@ xt_replace_table(struct xt_table *table, int *error) { struct xt_table_info *private; - unsigned int cpu; int ret; ret = xt_jumpstack_alloc(newinfo); @@ -1366,47 +1373,20 @@ xt_replace_table(struct xt_table *table, } /* Do the substitution. */ - local_bh_disable(); - private = table->private; + private = xt_table_get_private_protected(table); /* Check inside lock: is the old number correct? */ if (num_counters != private->number) { pr_debug("num_counters != table->private->number (%u/%u)\n", num_counters, private->number); - local_bh_enable(); *error = -EAGAIN; return NULL; } newinfo->initial_entries = private->initial_entries; - /* - * Ensure contents of newinfo are visible before assigning to - * private. - */ - smp_wmb(); - table->private = newinfo; - - /* make sure all cpus see new ->private value */ - smp_wmb(); - /* - * Even though table entries have now been swapped, other CPU's - * may still be using the old entries... - */ - local_bh_enable(); - - /* ... so wait for even xt_recseq on all cpus */ - for_each_possible_cpu(cpu) { - seqcount_t *s = &per_cpu(xt_recseq, cpu); - u32 seq = raw_read_seqcount(s); - - if (seq & 1) { - do { - cond_resched(); - cpu_relax(); - } while (seq == raw_read_seqcount(s)); - } - } + rcu_assign_pointer(table->private, newinfo); + synchronize_rcu(); audit_log_nfcfg(table->name, table->af, private->number, !private->number ? AUDIT_XT_OP_REGISTER : @@ -1442,12 +1422,12 @@ struct xt_table *xt_register_table(struct net *net, } /* Simplifies replace_table code. */ - table->private = bootstrap; + rcu_assign_pointer(table->private, bootstrap); if (!xt_replace_table(table, 0, newinfo, &ret)) goto unlock; - private = table->private; + private = xt_table_get_private_protected(table); pr_debug("table->private->number = %u\n", private->number); /* save number of initial entries */ @@ -1470,7 +1450,8 @@ void *xt_unregister_table(struct xt_table *table) struct xt_table_info *private; mutex_lock(&xt[table->af].mutex); - private = table->private; + private = xt_table_get_private_protected(table); + RCU_INIT_POINTER(table->private, NULL); list_del(&table->list); mutex_unlock(&xt[table->af].mutex); audit_log_nfcfg(table->name, table->af, private->number, diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig index 9b27599870e3..96b91674dd37 100644 --- a/net/nfc/Kconfig +++ b/net/nfc/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# NFC sybsystem configuration +# NFC subsystem configuration # menuconfig NFC diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6a88daab0190..5eddfe7bd391 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -2033,15 +2033,11 @@ static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info, struct sk_buff *reply) { struct ovs_zone_limit zone_limit; - int err; zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE; zone_limit.limit = info->default_limit; - err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); - if (err) - return err; - return 0; + return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit); } static int __ovs_ct_limit_get_zone_limit(struct net *net, diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index ec0689ddc635..4c5c2331e764 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2531,7 +2531,7 @@ static int validate_and_copy_dec_ttl(struct net *net, action_start = add_nested_action_start(sfa, OVS_DEC_TTL_ATTR_ACTION, log); if (action_start < 0) - return start; + return action_start; err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type, vlan_tci, mpls_label_count, log); diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 97101c55763d..68d6ef9e59fc 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -40,6 +40,7 @@ struct rfkill { enum rfkill_type type; unsigned long state; + unsigned long hard_block_reasons; u32 idx; @@ -265,6 +266,7 @@ static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | RFKILL_BLOCK_SW_PREV)); + ev->hard_block_reasons = rfkill->hard_block_reasons; spin_unlock_irqrestore(&rfkill->lock, flags); } @@ -522,19 +524,29 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type) } #endif -bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +bool rfkill_set_hw_state_reason(struct rfkill *rfkill, + bool blocked, unsigned long reason) { unsigned long flags; bool ret, prev; BUG_ON(!rfkill); + if (WARN(reason & + ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER), + "hw_state reason not supported: 0x%lx", reason)) + return blocked; + spin_lock_irqsave(&rfkill->lock, flags); - prev = !!(rfkill->state & RFKILL_BLOCK_HW); - if (blocked) + prev = !!(rfkill->hard_block_reasons & reason); + if (blocked) { rfkill->state |= RFKILL_BLOCK_HW; - else - rfkill->state &= ~RFKILL_BLOCK_HW; + rfkill->hard_block_reasons |= reason; + } else { + rfkill->hard_block_reasons &= ~reason; + if (!rfkill->hard_block_reasons) + rfkill->state &= ~RFKILL_BLOCK_HW; + } ret = !!(rfkill->state & RFKILL_BLOCK_ANY); spin_unlock_irqrestore(&rfkill->lock, flags); @@ -546,7 +558,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) return ret; } -EXPORT_SYMBOL(rfkill_set_hw_state); +EXPORT_SYMBOL(rfkill_set_hw_state_reason); static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) { @@ -744,6 +756,16 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(soft); +static ssize_t hard_block_reasons_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons); +} +static DEVICE_ATTR_RO(hard_block_reasons); + static u8 user_state_from_blocked(unsigned long state) { if (state & RFKILL_BLOCK_HW) @@ -796,6 +818,7 @@ static struct attribute *rfkill_dev_attrs[] = { &dev_attr_state.attr, &dev_attr_soft.attr, &dev_attr_hard.attr, + &dev_attr_hard_block_reasons.attr, NULL, }; ATTRIBUTE_GROUPS(rfkill_dev); @@ -811,6 +834,7 @@ static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) { struct rfkill *rfkill = to_rfkill(dev); unsigned long flags; + unsigned long reasons; u32 state; int error; @@ -823,10 +847,13 @@ static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) return error; spin_lock_irqsave(&rfkill->lock, flags); state = rfkill->state; + reasons = rfkill->hard_block_reasons; spin_unlock_irqrestore(&rfkill->lock, flags); error = add_uevent_var(env, "RFKILL_STATE=%d", user_state_from_blocked(state)); - return error; + if (error) + return error; + return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons); } void rfkill_pause_polling(struct rfkill *rfkill) diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 2c842851d72e..fef3573fdc8b 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -69,7 +69,7 @@ bool __rxrpc_set_call_completion(struct rxrpc_call *call, if (call->state < RXRPC_CALL_COMPLETE) { call->abort_code = abort_code; call->error = error; - call->completion = compl, + call->completion = compl; call->state = RXRPC_CALL_COMPLETE; trace_rxrpc_call_complete(call); wake_up(&call->waitq); diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a3b37d88800e..1e8ab4749c6c 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -281,7 +281,7 @@ config NET_SCH_CHOKE help Say Y here if you want to use the CHOKe packet scheduler (CHOose and Keep for responsive flows, CHOose and Kill for unresponsive - flows). This is a variation of RED which trys to penalize flows + flows). This is a variation of RED which tries to penalize flows that monopolize the queue. To compile this code as a module, choose M here: the @@ -813,7 +813,7 @@ config NET_ACT_SAMPLE config NET_ACT_IPT tristate "IPtables targets" - depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES + depends on NET_CLS_ACT && NETFILTER && NETFILTER_XTABLES help Say Y here to be able to invoke iptables targets after successful classification. @@ -912,7 +912,7 @@ config NET_ACT_BPF config NET_ACT_CONNMARK tristate "Netfilter Connection Mark Retriever" - depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES + depends on NET_CLS_ACT && NETFILTER depends on NF_CONNTRACK && NF_CONNTRACK_MARK help Say Y here to allow retrieving of conn mark @@ -924,7 +924,7 @@ config NET_ACT_CONNMARK config NET_ACT_CTINFO tristate "Netfilter Connection Mark Actions" - depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES + depends on NET_CLS_ACT && NETFILTER depends on NF_CONNTRACK && NF_CONNTRACK_MARK help Say Y here to allow transfer of a connmark stored information. diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index fed18fd2c50b..1319986693fc 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -2424,8 +2424,8 @@ static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, return err; } if (lse_mask->mpls_label) { - err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, - lse_key->mpls_label); + err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, + lse_key->mpls_label); if (err) return err; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 54209a18d7fe..6e1abe805448 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -1171,7 +1171,6 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; - int err; tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); cls_u32.command = add ? @@ -1194,13 +1193,9 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, cls_u32.knode.link_handle = ht->handle; } - err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, - &cls_u32, cb_priv, &n->flags, - &n->in_hw_count); - if (err) - return err; - - return 0; + return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, + &cls_u32, cb_priv, &n->flags, + &n->in_hw_count); } static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 4dda15588cf4..949163fe68af 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -401,6 +401,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); + timer_setup(&q->adapt_timer, fq_pie_timer, 0); if (opt) { err = fq_pie_change(sch, opt, extack); @@ -426,7 +427,6 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, pie_vars_init(&flow->vars); } - timer_setup(&q->adapt_timer, fq_pie_timer, 0); mod_timer(&q->adapt_timer, jiffies + HZ / 2); return 0; diff --git a/net/tipc/node.c b/net/tipc/node.c index 86b4d7ffb47a..83d9eb830592 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -2206,9 +2206,11 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, &xmitq); else if (prop == TIPC_NLA_PROP_MTU) tipc_link_set_mtu(e->link, b->mtu); + + /* Update MTU for node link entry */ + e->mtu = tipc_link_mss(e->link); } - /* Update MTU for node link entry */ - e->mtu = tipc_link_mss(e->link); + tipc_node_write_unlock(n); tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL); } diff --git a/net/wireless/core.h b/net/wireless/core.h index e3e9686859d4..7df91f940212 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -433,6 +433,8 @@ void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev); /* internal helpers */ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher); +bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev, + int key_idx, bool pairwise); int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 0ac820780437..e1e90761dc00 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -4,7 +4,7 @@ * * Copyright (c) 2009, Jouni Malinen <[email protected]> * Copyright (c) 2015 Intel Deutschland GmbH - * Copyright (C) 2019 Intel Corporation + * Copyright (C) 2019-2020 Intel Corporation */ #include <linux/kernel.h> @@ -81,7 +81,8 @@ static void cfg80211_process_auth(struct wireless_dev *wdev, } static void cfg80211_process_deauth(struct wireless_dev *wdev, - const u8 *buf, size_t len) + const u8 *buf, size_t len, + bool reconnect) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; @@ -89,7 +90,7 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev, u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); - nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL); + nl80211_send_deauth(rdev, wdev->netdev, buf, len, reconnect, GFP_KERNEL); if (!wdev->current_bss || !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) @@ -100,7 +101,8 @@ static void cfg80211_process_deauth(struct wireless_dev *wdev, } static void cfg80211_process_disassoc(struct wireless_dev *wdev, - const u8 *buf, size_t len) + const u8 *buf, size_t len, + bool reconnect) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; @@ -108,7 +110,8 @@ static void cfg80211_process_disassoc(struct wireless_dev *wdev, u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr); - nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL); + nl80211_send_disassoc(rdev, wdev->netdev, buf, len, reconnect, + GFP_KERNEL); if (WARN_ON(!wdev->current_bss || !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))) @@ -133,9 +136,9 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) if (ieee80211_is_auth(mgmt->frame_control)) cfg80211_process_auth(wdev, buf, len); else if (ieee80211_is_deauth(mgmt->frame_control)) - cfg80211_process_deauth(wdev, buf, len); + cfg80211_process_deauth(wdev, buf, len, false); else if (ieee80211_is_disassoc(mgmt->frame_control)) - cfg80211_process_disassoc(wdev, buf, len); + cfg80211_process_disassoc(wdev, buf, len, false); } EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt); @@ -180,22 +183,23 @@ void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss) } EXPORT_SYMBOL(cfg80211_abandon_assoc); -void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len) +void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len, + bool reconnect) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct ieee80211_mgmt *mgmt = (void *)buf; ASSERT_WDEV_LOCK(wdev); - trace_cfg80211_tx_mlme_mgmt(dev, buf, len); + trace_cfg80211_tx_mlme_mgmt(dev, buf, len, reconnect); if (WARN_ON(len < 2)) return; if (ieee80211_is_deauth(mgmt->frame_control)) - cfg80211_process_deauth(wdev, buf, len); + cfg80211_process_deauth(wdev, buf, len, reconnect); else - cfg80211_process_disassoc(wdev, buf, len); + cfg80211_process_disassoc(wdev, buf, len, reconnect); } EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8811a4b69f21..775d0c4d86c3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -399,6 +399,18 @@ nl80211_unsol_bcast_probe_resp_policy[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + .len = IEEE80211_MAX_DATA_LEN } }; +static const struct nla_policy +sar_specs_policy[NL80211_SAR_ATTR_SPECS_MAX + 1] = { + [NL80211_SAR_ATTR_SPECS_POWER] = { .type = NLA_S32 }, + [NL80211_SAR_ATTR_SPECS_RANGE_INDEX] = {.type = NLA_U32 }, +}; + +static const struct nla_policy +sar_policy[NL80211_SAR_ATTR_MAX + 1] = { + [NL80211_SAR_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U32, NUM_NL80211_SAR_TYPE), + [NL80211_SAR_ATTR_SPECS] = NLA_POLICY_NESTED_ARRAY(sar_specs_policy), +}; + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@ -718,6 +730,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SAE_PWE] = NLA_POLICY_RANGE(NLA_U8, NL80211_SAE_PWE_HUNT_AND_PECK, NL80211_SAE_PWE_BOTH), + [NL80211_ATTR_RECONNECT_REQUESTED] = { .type = NLA_REJECT }, + [NL80211_ATTR_SAR_SPEC] = NLA_POLICY_NESTED(sar_policy), }; /* policy for the key attributes */ @@ -2094,6 +2108,56 @@ fail: return -ENOBUFS; } +static int +nl80211_put_sar_specs(struct cfg80211_registered_device *rdev, + struct sk_buff *msg) +{ + struct nlattr *sar_capa, *specs, *sub_freq_range; + u8 num_freq_ranges; + int i; + + if (!rdev->wiphy.sar_capa) + return 0; + + num_freq_ranges = rdev->wiphy.sar_capa->num_freq_ranges; + + sar_capa = nla_nest_start(msg, NL80211_ATTR_SAR_SPEC); + if (!sar_capa) + return -ENOSPC; + + if (nla_put_u32(msg, NL80211_SAR_ATTR_TYPE, rdev->wiphy.sar_capa->type)) + goto fail; + + specs = nla_nest_start(msg, NL80211_SAR_ATTR_SPECS); + if (!specs) + goto fail; + + /* report supported freq_ranges */ + for (i = 0; i < num_freq_ranges; i++) { + sub_freq_range = nla_nest_start(msg, i + 1); + if (!sub_freq_range) + goto fail; + + if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_START_FREQ, + rdev->wiphy.sar_capa->freq_ranges[i].start_freq)) + goto fail; + + if (nla_put_u32(msg, NL80211_SAR_ATTR_SPECS_END_FREQ, + rdev->wiphy.sar_capa->freq_ranges[i].end_freq)) + goto fail; + + nla_nest_end(msg, sub_freq_range); + } + + nla_nest_end(msg, specs); + nla_nest_end(msg, sar_capa); + + return 0; +fail: + nla_nest_cancel(msg, sar_capa); + return -ENOBUFS; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@ -2343,6 +2407,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST); CMD(update_connect_params, UPDATE_CONNECT_PARAMS); CMD(update_ft_ies, UPDATE_FT_IES); + if (rdev->wiphy.sar_capa) + CMD(set_sar_specs, SET_SAR_SPECS); } #undef CMD @@ -2668,6 +2734,11 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, if (nl80211_put_tid_config_support(rdev, msg)) goto nla_put_failure; + state->split_start++; + break; + case 16: + if (nl80211_put_sar_specs(rdev, msg)) + goto nla_put_failure; /* done */ state->split_start = 0; @@ -4239,9 +4310,6 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (err) return err; - if (key.idx < 0) - return -EINVAL; - if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@ -4257,6 +4325,10 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; + if (!cfg80211_valid_key_idx(rdev, key.idx, + key.type == NL80211_KEYTYPE_PAIRWISE)) + return -EINVAL; + if (!rdev->ops->del_key) return -EOPNOTSUPP; @@ -5017,6 +5089,8 @@ static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, params->vht_required = true; if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HE_PHY) params->he_required = true; + if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_SAE_H2E) + params->sae_h2e_required = true; } } @@ -8241,12 +8315,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) } if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) { - if (!wiphy_ext_feature_isset(wiphy, - NL80211_EXT_FEATURE_SET_SCAN_DWELL)) { - err = -EOPNOTSUPP; - goto out_free; - } - request->duration = nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]); request->duration_mandatory = @@ -11175,6 +11243,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) case NL80211_IFTYPE_P2P_DEVICE: if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; + break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_P2P_CLIENT: @@ -12644,7 +12713,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; - struct cfg80211_gtk_rekey_data rekey_data; + struct cfg80211_gtk_rekey_data rekey_data = {}; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) @@ -14669,6 +14738,111 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb, } } +static int nl80211_set_sar_sub_specs(struct cfg80211_registered_device *rdev, + struct cfg80211_sar_specs *sar_specs, + struct nlattr *spec[], int index) +{ + u32 range_index, i; + + if (!sar_specs || !spec) + return -EINVAL; + + if (!spec[NL80211_SAR_ATTR_SPECS_POWER] || + !spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX]) + return -EINVAL; + + range_index = nla_get_u32(spec[NL80211_SAR_ATTR_SPECS_RANGE_INDEX]); + + /* check if range_index exceeds num_freq_ranges */ + if (range_index >= rdev->wiphy.sar_capa->num_freq_ranges) + return -EINVAL; + + /* check if range_index duplicates */ + for (i = 0; i < index; i++) { + if (sar_specs->sub_specs[i].freq_range_index == range_index) + return -EINVAL; + } + + sar_specs->sub_specs[index].power = + nla_get_s32(spec[NL80211_SAR_ATTR_SPECS_POWER]); + + sar_specs->sub_specs[index].freq_range_index = range_index; + + return 0; +} + +static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct nlattr *spec[NL80211_SAR_ATTR_SPECS_MAX + 1]; + struct nlattr *tb[NL80211_SAR_ATTR_MAX + 1]; + struct cfg80211_sar_specs *sar_spec; + enum nl80211_sar_type type; + struct nlattr *spec_list; + u32 specs; + int rem, err; + + if (!rdev->wiphy.sar_capa || !rdev->ops->set_sar_specs) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_SAR_SPEC]) + return -EINVAL; + + nla_parse_nested(tb, NL80211_SAR_ATTR_MAX, + info->attrs[NL80211_ATTR_SAR_SPEC], + NULL, NULL); + + if (!tb[NL80211_SAR_ATTR_TYPE] || !tb[NL80211_SAR_ATTR_SPECS]) + return -EINVAL; + + type = nla_get_u32(tb[NL80211_SAR_ATTR_TYPE]); + if (type != rdev->wiphy.sar_capa->type) + return -EINVAL; + + specs = 0; + nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) + specs++; + + if (specs > rdev->wiphy.sar_capa->num_freq_ranges) + return -EINVAL; + + sar_spec = kzalloc(sizeof(*sar_spec) + + specs * sizeof(struct cfg80211_sar_sub_specs), + GFP_KERNEL); + if (!sar_spec) + return -ENOMEM; + + sar_spec->type = type; + specs = 0; + nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) { + nla_parse_nested(spec, NL80211_SAR_ATTR_SPECS_MAX, + spec_list, NULL, NULL); + + switch (type) { + case NL80211_SAR_TYPE_POWER: + if (nl80211_set_sar_sub_specs(rdev, sar_spec, + spec, specs)) { + err = -EINVAL; + goto error; + } + break; + default: + err = -EINVAL; + goto error; + } + specs++; + } + + sar_spec->num_sub_specs = specs; + + rdev->cur_cmd_info = info; + err = rdev_set_sar_specs(rdev, sar_spec); + rdev->cur_cmd_info = NULL; +error: + kfree(sar_spec); + return err; +} + static const struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, @@ -15522,6 +15696,14 @@ static const struct genl_small_ops nl80211_small_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_SET_SAR_SPECS, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .doit = nl80211_set_sar_specs, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, + }, }; static struct genl_family nl80211_fam __ro_after_init = { @@ -15857,7 +16039,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, const u8 *buf, size_t len, enum nl80211_commands cmd, gfp_t gfp, int uapsd_queues, const u8 *req_ies, - size_t req_ies_len) + size_t req_ies_len, bool reconnect) { struct sk_buff *msg; void *hdr; @@ -15879,6 +16061,9 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, nla_put(msg, NL80211_ATTR_REQ_IE, req_ies_len, req_ies))) goto nla_put_failure; + if (reconnect && nla_put_flag(msg, NL80211_ATTR_RECONNECT_REQUESTED)) + goto nla_put_failure; + if (uapsd_queues >= 0) { struct nlattr *nla_wmm = nla_nest_start_noflag(msg, NL80211_ATTR_STA_WME); @@ -15907,7 +16092,8 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0); + NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0, + false); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, @@ -15917,23 +16103,25 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE, gfp, uapsd_queues, - req_ies, req_ies_len); + req_ies, req_ies_len, false); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, - size_t len, gfp_t gfp) + size_t len, bool reconnect, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0); + NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0, + reconnect); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, - size_t len, gfp_t gfp) + size_t len, bool reconnect, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, - NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0); + NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0, + reconnect); } void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, @@ -15964,7 +16152,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len); nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1, - NULL, 0); + NULL, 0, false); } EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt); @@ -17065,7 +17253,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef, gfp_t gfp, enum nl80211_commands notif, - u8 count) + u8 count, bool quiet) { struct sk_buff *msg; void *hdr; @@ -17086,9 +17274,13 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, if (nl80211_send_chandef(msg, chandef)) goto nla_put_failure; - if ((notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) && - (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count))) + if (notif == NL80211_CMD_CH_SWITCH_STARTED_NOTIFY) { + if (nla_put_u32(msg, NL80211_ATTR_CH_SWITCH_COUNT, count)) + goto nla_put_failure; + if (quiet && + nla_put_flag(msg, NL80211_ATTR_CH_SWITCH_BLOCK_TX)) goto nla_put_failure; + } genlmsg_end(msg, hdr); @@ -17121,13 +17313,13 @@ void cfg80211_ch_switch_notify(struct net_device *dev, cfg80211_sched_dfs_chan_update(rdev); nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, - NL80211_CMD_CH_SWITCH_NOTIFY, 0); + NL80211_CMD_CH_SWITCH_NOTIFY, 0, false); } EXPORT_SYMBOL(cfg80211_ch_switch_notify); void cfg80211_ch_switch_started_notify(struct net_device *dev, struct cfg80211_chan_def *chandef, - u8 count) + u8 count, bool quiet) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; @@ -17136,7 +17328,8 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev, trace_cfg80211_ch_switch_started_notify(dev, chandef); nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL, - NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, count); + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, + count, quiet); } EXPORT_SYMBOL(cfg80211_ch_switch_started_notify); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index d3e8e426c486..a3f387770f1b 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018, 2020 Intel Corporation */ #ifndef __NET_WIRELESS_NL80211_H #define __NET_WIRELESS_NL80211_H @@ -69,10 +69,12 @@ void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, const u8 *req_ies, size_t req_ies_len); void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, - const u8 *buf, size_t len, gfp_t gfp); + const u8 *buf, size_t len, + bool reconnect, gfp_t gfp); void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, - const u8 *buf, size_t len, gfp_t gfp); + const u8 *buf, size_t len, + bool reconnect, gfp_t gfp); void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp); diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 5e2f349c92a8..8b1358d04ca2 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1346,4 +1346,16 @@ static inline int rdev_reset_tid_config(struct cfg80211_registered_device *rdev, return ret; } +static inline int rdev_set_sar_specs(struct cfg80211_registered_device *rdev, + struct cfg80211_sar_specs *sar) +{ + int ret; + + trace_rdev_set_sar_specs(&rdev->wiphy, sar); + ret = rdev->ops->set_sar_specs(&rdev->wiphy, sar); + trace_rdev_return_int(&rdev->wiphy, ret); + + return ret; +} + #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index a04fdfb35f07..bb72447ad960 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1616,7 +1616,7 @@ static const struct ieee80211_reg_rule * __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); - const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20}; + static const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20}; const struct ieee80211_reg_rule *reg_rule; int i = ARRAY_SIZE(bws) - 1; u32 bw; @@ -2547,6 +2547,7 @@ static void handle_band_custom(struct wiphy *wiphy, void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { + const struct ieee80211_regdomain *new_regd, *tmp; enum nl80211_band band; unsigned int bands_set = 0; @@ -2566,6 +2567,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, * on your device's supported bands. */ WARN_ON(!bands_set); + new_regd = reg_copy_regd(regd); + if (IS_ERR(new_regd)) + return; + + tmp = get_wiphy_regdom(wiphy); + rcu_assign_pointer(wiphy->regd, new_regd); + rcu_free_regdom(tmp); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 3409f37d838b..1b7fec3b53cd 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -726,7 +726,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) int n_channels, count = 0, err; struct cfg80211_scan_request *request, *rdev_req = rdev->scan_req; LIST_HEAD(coloc_ap_list); - bool need_scan_psc; + bool need_scan_psc = true; const struct ieee80211_sband_iftype_data *iftd; rdev_req->scan_6ghz = true; @@ -770,20 +770,18 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) (void *)&request->channels[n_channels]; /* - * PSC channels should not be scanned if all the reported co-located APs - * are indicating that all APs in the same ESS are co-located + * PSC channels should not be scanned in case of direct scan with 1 SSID + * and at least one of the reported co-located APs with same SSID + * indicating that all APs in the same ESS are co-located */ - if (count) { - need_scan_psc = false; - + if (count && request->n_ssids == 1 && request->ssids[0].ssid_len) { list_for_each_entry(ap, &coloc_ap_list, list) { - if (!ap->colocated_ess) { - need_scan_psc = true; + if (ap->colocated_ess && + cfg80211_find_ssid_match(ap, request)) { + need_scan_psc = false; break; } } - } else { - need_scan_psc = true; } /* @@ -1901,6 +1899,9 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; tmp.ts_boottime = data->boottime_ns; + tmp.parent_tsf = data->parent_tsf; + ether_addr_copy(tmp.parent_bssid, data->parent_bssid); + if (non_tx_data) { tmp.pub.transmitted_bss = non_tx_data->tx_bss; ts = bss_from_pub(non_tx_data->tx_bss)->ts; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 817c6fef13be..76b777d5903f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2679,19 +2679,23 @@ DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, ); TRACE_EVENT(cfg80211_tx_mlme_mgmt, - TP_PROTO(struct net_device *netdev, const u8 *buf, int len), - TP_ARGS(netdev, buf, len), + TP_PROTO(struct net_device *netdev, const u8 *buf, int len, + bool reconnect), + TP_ARGS(netdev, buf, len, reconnect), TP_STRUCT__entry( NETDEV_ENTRY __dynamic_array(u8, frame, len) + __field(int, reconnect) ), TP_fast_assign( NETDEV_ASSIGN; memcpy(__get_dynamic_array(frame), buf, len); + __entry->reconnect = reconnect; ), - TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", + TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x reconnect:%d", NETDEV_PR_ARG, - le16_to_cpup((__le16 *)__get_dynamic_array(frame))) + le16_to_cpup((__le16 *)__get_dynamic_array(frame)), + __entry->reconnect) ); DECLARE_EVENT_CLASS(netdev_mac_evt, @@ -3542,6 +3546,25 @@ TRACE_EVENT(rdev_reset_tid_config, TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT ", tids: 0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tids) ); + +TRACE_EVENT(rdev_set_sar_specs, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_sar_specs *sar), + TP_ARGS(wiphy, sar), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(u16, type) + __field(u16, num) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->type = sar->type; + __entry->num = sar->num_sub_specs; + + ), + TP_printk(WIPHY_PR_FMT ", Set type:%d, num_specs:%d", + WIPHY_PR_ARG, __entry->type, __entry->num) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/util.c b/net/wireless/util.c index 5af88037f1fb..b4acc805114b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -272,18 +272,53 @@ bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher) return false; } -int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, - struct key_params *params, int key_idx, - bool pairwise, const u8 *mac_addr) +static bool +cfg80211_igtk_cipher_supported(struct cfg80211_registered_device *rdev) { - int max_key_idx = 5; + struct wiphy *wiphy = &rdev->wiphy; + int i; + + for (i = 0; i < wiphy->n_cipher_suites; i++) { + switch (wiphy->cipher_suites[i]) { + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + return true; + } + } - if (wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_BEACON_PROTECTION) || - wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) + return false; +} + +bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev, + int key_idx, bool pairwise) +{ + int max_key_idx; + + if (pairwise) + max_key_idx = 3; + else if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION) || + wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT)) max_key_idx = 7; + else if (cfg80211_igtk_cipher_supported(rdev)) + max_key_idx = 5; + else + max_key_idx = 3; + if (key_idx < 0 || key_idx > max_key_idx) + return false; + + return true; +} + +int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, + struct key_params *params, int key_idx, + bool pairwise, const u8 *mac_addr) +{ + if (!cfg80211_valid_key_idx(rdev, key_idx, pairwise)) return -EINVAL; if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) @@ -335,6 +370,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, case WLAN_CIPHER_SUITE_WEP104: if (key_idx > 3) return -EINVAL; + break; default: break; } diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index b84a345b2653..fd9ad74972fb 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1421,39 +1421,78 @@ static int cfg80211_wext_siwpmksa(struct net_device *dev, } } +#define DEFINE_WEXT_COMPAT_STUB(func, type) \ + static int __ ## func(struct net_device *dev, \ + struct iw_request_info *info, \ + union iwreq_data *wrqu, \ + char *extra) \ + { \ + return func(dev, info, (type *)wrqu, extra); \ + } + +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwname, char) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwfreq, struct iw_freq) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwfreq, struct iw_freq) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwmode, u32) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwmode, u32) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrange, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwap, struct sockaddr) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwap, struct sockaddr) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwmlme, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwscan, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwessid, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwessid, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwrate, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrate, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwrts, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwrts, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwfrag, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwfrag, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwretry, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwretry, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwencode, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwencode, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwpower, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwpower, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwgenie, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_giwauth, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwauth, struct iw_param) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwencodeext, struct iw_point) +DEFINE_WEXT_COMPAT_STUB(cfg80211_wext_siwpmksa, struct iw_point) + static const iw_handler cfg80211_handlers[] = { - [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, - [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq, - [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq, - [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode, - [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode, - [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange, - [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap, - [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap, - [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme, - [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan, - [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan, - [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid, - [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid, - [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate, - [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate, - [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts, - [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts, - [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag, - [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag, - [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower, - [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower, - [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry, - [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry, - [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode, - [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode, - [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower, - [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower, - [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie, - [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth, - [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth, - [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext, - [IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa, + [IW_IOCTL_IDX(SIOCGIWNAME)] = __cfg80211_wext_giwname, + [IW_IOCTL_IDX(SIOCSIWFREQ)] = __cfg80211_wext_siwfreq, + [IW_IOCTL_IDX(SIOCGIWFREQ)] = __cfg80211_wext_giwfreq, + [IW_IOCTL_IDX(SIOCSIWMODE)] = __cfg80211_wext_siwmode, + [IW_IOCTL_IDX(SIOCGIWMODE)] = __cfg80211_wext_giwmode, + [IW_IOCTL_IDX(SIOCGIWRANGE)] = __cfg80211_wext_giwrange, + [IW_IOCTL_IDX(SIOCSIWAP)] = __cfg80211_wext_siwap, + [IW_IOCTL_IDX(SIOCGIWAP)] = __cfg80211_wext_giwap, + [IW_IOCTL_IDX(SIOCSIWMLME)] = __cfg80211_wext_siwmlme, + [IW_IOCTL_IDX(SIOCSIWSCAN)] = cfg80211_wext_siwscan, + [IW_IOCTL_IDX(SIOCGIWSCAN)] = __cfg80211_wext_giwscan, + [IW_IOCTL_IDX(SIOCSIWESSID)] = __cfg80211_wext_siwessid, + [IW_IOCTL_IDX(SIOCGIWESSID)] = __cfg80211_wext_giwessid, + [IW_IOCTL_IDX(SIOCSIWRATE)] = __cfg80211_wext_siwrate, + [IW_IOCTL_IDX(SIOCGIWRATE)] = __cfg80211_wext_giwrate, + [IW_IOCTL_IDX(SIOCSIWRTS)] = __cfg80211_wext_siwrts, + [IW_IOCTL_IDX(SIOCGIWRTS)] = __cfg80211_wext_giwrts, + [IW_IOCTL_IDX(SIOCSIWFRAG)] = __cfg80211_wext_siwfrag, + [IW_IOCTL_IDX(SIOCGIWFRAG)] = __cfg80211_wext_giwfrag, + [IW_IOCTL_IDX(SIOCSIWTXPOW)] = cfg80211_wext_siwtxpower, + [IW_IOCTL_IDX(SIOCGIWTXPOW)] = cfg80211_wext_giwtxpower, + [IW_IOCTL_IDX(SIOCSIWRETRY)] = __cfg80211_wext_siwretry, + [IW_IOCTL_IDX(SIOCGIWRETRY)] = __cfg80211_wext_giwretry, + [IW_IOCTL_IDX(SIOCSIWENCODE)] = __cfg80211_wext_siwencode, + [IW_IOCTL_IDX(SIOCGIWENCODE)] = __cfg80211_wext_giwencode, + [IW_IOCTL_IDX(SIOCSIWPOWER)] = __cfg80211_wext_siwpower, + [IW_IOCTL_IDX(SIOCGIWPOWER)] = __cfg80211_wext_giwpower, + [IW_IOCTL_IDX(SIOCSIWGENIE)] = __cfg80211_wext_siwgenie, + [IW_IOCTL_IDX(SIOCSIWAUTH)] = __cfg80211_wext_siwauth, + [IW_IOCTL_IDX(SIOCGIWAUTH)] = __cfg80211_wext_giwauth, + [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= __cfg80211_wext_siwencodeext, + [IW_IOCTL_IDX(SIOCSIWPMKSA)] = __cfg80211_wext_siwpmksa, }; const struct iw_handler_def cfg80211_wext_handler = { diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index f92073f3cb11..57a81100c5da 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -58,11 +58,6 @@ static inline void x25_stop_t20timer(struct x25_neigh *nb) del_timer(&nb->t20timer); } -static inline int x25_t20timer_pending(struct x25_neigh *nb) -{ - return timer_pending(&nb->t20timer); -} - /* * This handles all restart and diagnostic frames. */ @@ -70,17 +65,20 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, unsigned short frametype) { struct sk_buff *skbn; - int confirm; switch (frametype) { case X25_RESTART_REQUEST: switch (nb->state) { + case X25_LINK_STATE_0: + /* This can happen when the x25 module just gets loaded + * and doesn't know layer 2 has already connected + */ + nb->state = X25_LINK_STATE_3; + x25_transmit_restart_confirmation(nb); + break; case X25_LINK_STATE_2: - confirm = !x25_t20timer_pending(nb); x25_stop_t20timer(nb); nb->state = X25_LINK_STATE_3; - if (confirm) - x25_transmit_restart_confirmation(nb); break; case X25_LINK_STATE_3: /* clear existing virtual calls */ @@ -94,13 +92,8 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, case X25_RESTART_CONFIRMATION: switch (nb->state) { case X25_LINK_STATE_2: - if (x25_t20timer_pending(nb)) { - x25_stop_t20timer(nb); - nb->state = X25_LINK_STATE_3; - } else { - x25_transmit_restart_request(nb); - x25_start_t20timer(nb); - } + x25_stop_t20timer(nb); + nb->state = X25_LINK_STATE_3; break; case X25_LINK_STATE_3: /* clear existing virtual calls */ diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 56c46e5f57bc..310cfc68875a 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -212,6 +212,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, return 0; } +static bool xsk_tx_writeable(struct xdp_sock *xs) +{ + if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) + return false; + + return true; +} + static bool xsk_is_bound(struct xdp_sock *xs) { if (READ_ONCE(xs->state) == XSK_BOUND) { @@ -298,7 +306,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool) rcu_read_lock(); list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { __xskq_cons_release(xs->tx); - xs->sk.sk_write_space(&xs->sk); + if (xsk_tx_writeable(xs)) + xs->sk.sk_write_space(&xs->sk); } rcu_read_unlock(); } @@ -495,7 +504,8 @@ static int xsk_generic_xmit(struct sock *sk) out: if (sent_frame) - sk->sk_write_space(sk); + if (xsk_tx_writeable(xs)) + sk->sk_write_space(sk); mutex_unlock(&xs->mutex); return err; @@ -577,11 +587,13 @@ static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int fl static __poll_t xsk_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait) { - __poll_t mask = datagram_poll(file, sock, wait); + __poll_t mask = 0; struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); struct xsk_buff_pool *pool; + sock_poll_wait(file, sock, wait); + if (unlikely(!xsk_is_bound(xs))) return mask; @@ -597,7 +609,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, if (xs->rx && !xskq_prod_is_empty(xs->rx)) mask |= EPOLLIN | EPOLLRDNORM; - if (xs->tx && !xskq_cons_is_full(xs->tx)) + if (xs->tx && xsk_tx_writeable(xs)) mask |= EPOLLOUT | EPOLLWRNORM; return mask; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 556d82d03687..67a4494d63b6 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -174,6 +174,7 @@ static int __xp_assign_dev(struct xsk_buff_pool *pool, if (!pool->dma_pages) { WARN(1, "Driver did not DMA map zero-copy buffers"); + err = -EINVAL; goto err_unreg_xsk; } pool->umem->zc = true; diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index b936c46b1e16..4a9663aa7afe 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -307,6 +307,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q) q->nentries; } +static inline u32 xskq_cons_present_entries(struct xsk_queue *q) +{ + /* No barriers needed since data is not accessed */ + return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); +} + /* Functions for producers */ static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index e28f0c9ecd6a..d8e8a11ca845 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -234,6 +234,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src) case XFRMA_PAD: /* Ignore */ return 0; + case XFRMA_UNSPEC: case XFRMA_ALG_AUTH: case XFRMA_ALG_CRYPT: case XFRMA_ALG_COMP: @@ -387,7 +388,7 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src, memcpy(nla, src, nla_attr_size(copy_len)); nla->nla_len = nla_attr_size(payload); - *pos += nla_attr_size(payload); + *pos += nla_attr_size(copy_len); nlmsg->nlmsg_len += nla->nla_len; memset(dst + *pos, 0, payload - copy_len); @@ -563,7 +564,7 @@ static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32, return NULL; len += NLMSG_HDRLEN; - h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO); + h64 = kvmalloc(len, GFP_KERNEL); if (!h64) return ERR_PTR(-ENOMEM); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index a77da7aae6fe..2f1517827995 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2382,8 +2382,10 @@ int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) if (in_compat_syscall()) { struct xfrm_translator *xtr = xfrm_get_translator(); - if (!xtr) + if (!xtr) { + kfree(data); return -EOPNOTSUPP; + } err = xtr->xlate_user_policy_sockptr(&data, optlen); xfrm_put_translator(xtr); diff --git a/scripts/Makefile.build b/scripts/Makefile.build index ae647379b579..4c058f12dd73 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -252,6 +252,9 @@ objtool_dep = $(objtool_obj) \ ifdef CONFIG_TRIM_UNUSED_KSYMS cmd_gen_ksymdeps = \ $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd + +# List module undefined symbols +undefined_syms = $(NM) $< | $(AWK) '$$1 == "U" { printf("%s%s", x++ ? " " : "", $$2) }'; endif define rule_cc_o_c @@ -271,13 +274,6 @@ define rule_as_o_S $(call cmd,modversions_S) endef -# List module undefined symbols (or empty line if not enabled) -ifdef CONFIG_TRIM_UNUSED_KSYMS -cmd_undef_syms = $(NM) $< | sed -n 's/^ *U //p' | xargs echo -else -cmd_undef_syms = echo -endif - # Built-in and composite module parts $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE $(call if_changed_rule,cc_o_c) @@ -285,7 +281,7 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE cmd_mod = { \ echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \ - $(cmd_undef_syms); \ + $(undefined_syms) echo; \ } > $@ $(obj)/%.mod: $(obj)/%.o FORCE diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 95e4cdb94fe9..6baee1200615 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -60,7 +60,6 @@ endif # ifneq ($(findstring 2, $(KBUILD_EXTRA_WARN)),) -KBUILD_CFLAGS += -Wcast-align KBUILD_CFLAGS += -Wdisabled-optimization KBUILD_CFLAGS += -Wnested-externs KBUILD_CFLAGS += -Wshadow @@ -80,6 +79,7 @@ endif ifneq ($(findstring 3, $(KBUILD_EXTRA_WARN)),) KBUILD_CFLAGS += -Wbad-function-cast +KBUILD_CFLAGS += -Wcast-align KBUILD_CFLAGS += -Wcast-qual KBUILD_CFLAGS += -Wconversion KBUILD_CFLAGS += -Wpacked diff --git a/scripts/lld-version.sh b/scripts/lld-version.sh new file mode 100755 index 000000000000..d70edb4d8a4f --- /dev/null +++ b/scripts/lld-version.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Usage: $ ./scripts/lld-version.sh ld.lld +# +# Print the linker version of `ld.lld' in a 5 or 6-digit form +# such as `100001' for ld.lld 10.0.1 etc. + +linker_string="$($* --version)" + +if ! ( echo $linker_string | grep -q LLD ); then + echo 0 + exit 1 +fi + +VERSION=$(echo $linker_string | cut -d ' ' -f 2) +MAJOR=$(echo $VERSION | cut -d . -f 1) +MINOR=$(echo $VERSION | cut -d . -f 2) +PATCHLEVEL=$(echo $VERSION | cut -d . -f 3) +printf "%d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index 27007c18e754..e377f52dbfa3 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -243,5 +243,8 @@ int main(void) DEVID(mhi_device_id); DEVID_FIELD(mhi_device_id, chan); + DEVID(auxiliary_device_id); + DEVID_FIELD(auxiliary_device_id, name); + return 0; } diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 2417dd1dee33..fb4827027536 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1364,6 +1364,13 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) { DEF_FIELD_ADDR(symval, mhi_device_id, chan); sprintf(alias, MHI_DEVICE_MODALIAS_FMT, *chan); + return 1; +} + +static int do_auxiliary_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, auxiliary_device_id, name); + sprintf(alias, AUXILIARY_MODULE_PREFIX "%s", *name); return 1; } @@ -1442,6 +1449,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, }; /* Create MODULE_ALIAS() statements. diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h index 568854b14d0a..52c6262e6bfd 100644 --- a/tools/arch/x86/include/asm/insn.h +++ b/tools/arch/x86/include/asm/insn.h @@ -201,6 +201,21 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +/** + * for_each_insn_prefix() -- Iterate prefixes in the instruction + * @insn: Pointer to struct insn. + * @idx: Index storage. + * @prefix: Prefix byte. + * + * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix + * and the index is stored in @idx (note that this @idx is just for a cursor, + * do not change it.) + * Since prefixes.nbytes can be bigger than 4 if some prefixes + * are repeated, it cannot be used for looping over the prefixes. + */ +#define for_each_insn_prefix(insn, idx, prefix) \ + for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) + #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index df7d8ec76036..477e55d59c34 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -89,9 +89,9 @@ libbpf_print_none(__maybe_unused enum libbpf_print_level level, int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) { - char buf[4096]; - struct pid_iter_bpf *skel; struct pid_iter_entry *e; + char buf[4096 / sizeof(*e) * sizeof(*e)]; + struct pid_iter_bpf *skel; int err, ret, fd = -1, i; libbpf_print_fn_t default_print; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1233f14f659f..7e9931b6c735 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3977,8 +3977,8 @@ union bpf_attr { FN(seq_printf_btf), \ FN(skb_cgroup_classid), \ FN(redirect_neigh), \ - FN(bpf_per_cpu_ptr), \ - FN(bpf_this_cpu_ptr), \ + FN(per_cpu_ptr), \ + FN(this_cpu_ptr), \ FN(redirect_peer), \ FN(task_storage_get), \ FN(task_storage_delete), \ diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c index 5c6522c89af1..98537ff2679e 100644 --- a/tools/lib/bpf/ringbuf.c +++ b/tools/lib/bpf/ringbuf.c @@ -278,7 +278,7 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms) err = ringbuf_process_ring(ring); if (err < 0) return err; - res += cnt; + res += err; } return cnt < 0 ? -errno : res; } diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index cb16d2aac51c..54188ee16c48 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2040,7 +2040,7 @@ sub reboot_to { if ($reboot_type eq "grub") { run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'"; - } elsif ($reboot_type eq "grub2") { + } elsif (($reboot_type eq "grub2") or ($reboot_type eq "grub2bls")) { run_ssh "$grub_reboot $grub_number"; } elsif ($reboot_type eq "syslinux") { run_ssh "$syslinux --once \\\"$syslinux_label\\\" $syslinux_path"; diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 52414058a627..5861446d0777 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -456,10 +456,10 @@ static struct bpf_align_test tests[] = { */ {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, /* Checked s>=0 */ - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* packet pointer + nonnegative (4n+2) */ - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, - {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, + {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. * We checked the bounds, but it might have been able * to overflow if the packet pointer started in the @@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = { * So we did not get a 'range' on R6, and the access * attempt will fail. */ - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372034707292158,var_off=(0x2; 0x7fffffff7ffffffc)"}, + {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, } }, { diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index c1650548433c..fddbc5db5d6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -217,9 +217,15 @@ void test_ringbuf(void) if (CHECK(err, "join_bg", "err %d\n", err)) goto cleanup; - if (CHECK(bg_ret != 1, "bg_ret", "epoll_wait result: %ld", bg_ret)) + if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret)) goto cleanup; + /* due to timing variations, there could still be non-notified + * samples, so consume them here to collect all the samples + */ + err = ring_buffer__consume(ringbuf); + CHECK(err < 0, "rb_consume", "failed: %d\b", err); + /* 3 rounds, 2 samples each */ cnt = atomic_xchg(&sample_cnt, 0); CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt); diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c index 78e450609803..d37161e59bb2 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -81,7 +81,7 @@ void test_ringbuf_multi(void) /* poll for samples, should get 2 ringbufs back */ err = ring_buffer__poll(ringbuf, -1); - if (CHECK(err != 4, "poll_res", "expected 4 records, got %d\n", err)) + if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err)) goto cleanup; /* expect extra polling to return nothing */ diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 43c9cda199b8..b99bb8ed3ed4 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -184,9 +184,7 @@ def bpftool_prog_list(expected=None, ns=""): def bpftool_map_list(expected=None, ns=""): _, maps = bpftool("map show", JSON=True, ns=ns, fail=True) # Remove the base maps - for m in base_maps: - if m in maps: - maps.remove(m) + maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names] if expected is not None: if len(maps) != expected: fail(True, "%d BPF maps loaded, expected %d" % @@ -716,13 +714,11 @@ def test_multi_prog(simdev, sim, obj, modename, modeid): fail(ret == 0, "Replaced one of programs without -force") check_extack(err, "XDP program already attached.", args) - if modename == "" or modename == "drv": - othermode = "" if modename == "drv" else "drv" - start_test("Test multi-attachment XDP - detach...") - ret, _, err = sim.unset_xdp(othermode, force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode") - check_extack(err, "program loaded with different flags.", args) + start_test("Test multi-attachment XDP - remove without mode...") + ret, _, err = sim.unset_xdp("", force=True, + fail=False, include_stderr=True) + fail(ret == 0, "Removed program without a mode flag") + check_extack(err, "More than one program loaded, unset mode is ambiguous.", args) sim.unset_xdp("offload") xdp = sim.ip_link_show(xdp=True)["xdp"] @@ -772,6 +768,9 @@ ret, progs = bpftool("prog", fail=False) skip(ret != 0, "bpftool not installed") base_progs = progs _, base_maps = bpftool("map") +base_map_names = [ + 'pid_iter.rodata' # created on each bpftool invocation +] # Check netdevsim ret, out = cmd("modprobe netdevsim", fail=False) @@ -913,11 +912,18 @@ try: sim.tc_flush_filters() + start_test("Test TC offloads failure...") + sim.dfs["dev/bpf_bind_verifier_accept"] = 0 + ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, + fail=False, include_stderr=True) + fail(ret == 0, "TC filter did not reject with TC offloads enabled") + check_verifier_log(err, "[netdevsim] Hello from netdevsim!") + sim.dfs["dev/bpf_bind_verifier_accept"] = 1 + start_test("Test TC offloads work...") ret, _, err = sim.cls_bpf_add_filter(obj, verbose=True, skip_sw=True, fail=False, include_stderr=True) fail(ret != 0, "TC filter did not load with TC offloads enabled") - check_verifier_log(err, "[netdevsim] Hello from netdevsim!") start_test("Test TC offload basics...") dfs = simdev.dfs_get_bound_progs(expected=1) @@ -941,6 +947,7 @@ try: start_test("Test disabling TC offloads is rejected while filters installed...") ret, _ = sim.set_ethtool_tc_offloads(False, fail=False) fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...") + sim.set_ethtool_tc_offloads(True) start_test("Test qdisc removal frees things...") sim.tc_flush_filters() @@ -999,18 +1006,8 @@ try: fail=False, include_stderr=True) fail(ret == 0, "Replaced XDP program with a program in different mode") check_extack(err, - "native and generic XDP can't be active at the same time.", + "Native and generic XDP can't be active at the same time.", args) - ret, _, err = sim.set_xdp(obj, "", force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Replaced XDP program with a program in different mode") - check_extack(err, "program loaded with different flags.", args) - - start_test("Test XDP prog remove with bad flags...") - ret, _, err = sim.unset_xdp("", force=True, - fail=False, include_stderr=True) - fail(ret == 0, "Removed program with a bad mode") - check_extack(err, "program loaded with different flags.", args) start_test("Test MTU restrictions...") ret, _ = sim.set_mtu(9000, fail=False) @@ -1040,10 +1037,19 @@ try: offload = bpf_pinned("/sys/fs/bpf/offload") ret, _, err = sim.set_xdp(offload, "drv", fail=False, include_stderr=True) fail(ret == 0, "attached offloaded XDP program to drv") - check_extack(err, "using device-bound program without HW_MODE flag is not supported.", args) + check_extack(err, "Using device-bound program without HW_MODE flag is not supported.", args) rm("/sys/fs/bpf/offload") sim.wait_for_flush() + start_test("Test XDP load failure...") + sim.dfs["dev/bpf_bind_verifier_accept"] = 0 + ret, _, err = bpftool_prog_load("sample_ret0.o", "/sys/fs/bpf/offload", + dev=sim['ifname'], fail=False, include_stderr=True) + fail(ret == 0, "verifier should fail on load") + check_verifier_log(err, "[netdevsim] Hello from netdevsim!") + sim.dfs["dev/bpf_bind_verifier_accept"] = 1 + sim.wait_for_flush() + start_test("Test XDP offload...") _, _, err = sim.set_xdp(obj, "offload", verbose=True, include_stderr=True) ipl = sim.ip_link_show(xdp=True) @@ -1051,7 +1057,6 @@ try: progs = bpftool_prog_list(expected=1) prog = progs[0] fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID") - check_verifier_log(err, "[netdevsim] Hello from netdevsim!") start_test("Test XDP offload is device bound...") dfs = simdev.dfs_get_bound_progs(expected=1) diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c index 1c4b1939f5a8..bed53b561e04 100644 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -68,7 +68,7 @@ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), BPF_MOV32_IMM(BPF_REG_1, 0), BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c index dac40de3f868..57ed67b86074 100644 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -703,3 +703,44 @@ .fixup_map_hash_8b = { 3 }, .result = ACCEPT, }, +{ + "bounds checks after 32-bit truncation. test 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + /* This used to reduce the max bound to 0x7fffffff */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "bounds checks after 32-bit truncation. test 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1), + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh new file mode 100755 index 000000000000..0231205a7147 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/q_in_vni_veto.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +lib_dir=$(dirname $0)/../../../../net/forwarding + +VXPORT=4789 + +ALL_TESTS=" + create_dot1d_and_dot1ad_vxlans +" +NUM_NETIFS=2 +source $lib_dir/lib.sh + +setup_prepare() +{ + swp1=${NETIFS[p1]} + swp2=${NETIFS[p2]} + + ip link set dev $swp1 up + ip link set dev $swp2 up +} + +cleanup() +{ + pre_cleanup + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +create_dot1d_and_dot1ad_vxlans() +{ + RET=0 + + ip link add dev br0 type bridge vlan_filtering 1 vlan_protocol 802.1ad \ + vlan_default_pvid 0 mcast_snooping 0 + ip link set dev br0 up + + ip link add name vx100 type vxlan id 1000 local 192.0.2.17 dstport \ + "$VXPORT" nolearning noudpcsum tos inherit ttl 100 + ip link set dev vx100 up + + ip link set dev $swp1 master br0 + ip link set dev vx100 master br0 + bridge vlan add vid 100 dev vx100 pvid untagged + + ip link add dev br1 type bridge vlan_filtering 0 mcast_snooping 0 + ip link set dev br1 up + + ip link add name vx200 type vxlan id 2000 local 192.0.2.17 dstport \ + "$VXPORT" nolearning noudpcsum tos inherit ttl 100 + ip link set dev vx200 up + + ip link set dev $swp2 master br1 + ip link set dev vx200 master br1 2>/dev/null + check_fail $? "802.1d and 802.1ad VxLANs at the same time not rejected" + + ip link set dev vx200 master br1 2>&1 >/dev/null \ + | grep -q mlxsw_spectrum + check_err $? "802.1d and 802.1ad VxLANs at the same time rejected without extack" + + log_test "create 802.1d and 802.1ad VxLANs" + + ip link del dev vx200 + ip link del dev br1 + ip link del dev vx100 + ip link del dev br0 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh new file mode 100755 index 000000000000..f0443b1b05b9 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +lib_dir=$(dirname $0)/../../../../net/forwarding + +VXPORT=4789 + +ALL_TESTS=" + create_vxlan_on_top_of_8021ad_bridge +" +NUM_NETIFS=2 +source $lib_dir/lib.sh + +setup_prepare() +{ + swp1=${NETIFS[p1]} + swp2=${NETIFS[p2]} + + ip link set dev $swp1 up + ip link set dev $swp2 up +} + +cleanup() +{ + pre_cleanup + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +create_vxlan_on_top_of_8021ad_bridge() +{ + RET=0 + + ip link add dev br0 type bridge vlan_filtering 1 vlan_protocol 802.1ad \ + vlan_default_pvid 0 mcast_snooping 0 + ip link set dev br0 up + + ip link add name vx100 type vxlan id 1000 local 192.0.2.17 dstport \ + "$VXPORT" nolearning noudpcsum tos inherit ttl 100 + ip link set dev vx100 up + + ip link set dev $swp1 master br0 + ip link set dev vx100 master br0 + + bridge vlan add vid 100 dev vx100 pvid untagged 2>/dev/null + check_fail $? "802.1ad bridge with VxLAN in Spectrum-1 not rejected" + + bridge vlan add vid 100 dev vx100 pvid untagged 2>&1 >/dev/null \ + | grep -q mlxsw_spectrum + check_err $? "802.1ad bridge with VxLAN in Spectrum-1 rejected without extack" + + log_test "create VxLAN on top of 802.1ad bridge" + + ip link del dev vx100 + ip link del dev br0 +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index fb5c55dd6df8..02b0b9ead40b 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -256,6 +256,28 @@ setup_cmd_nsb() fi } +setup_cmd_nsc() +{ + local cmd="$*" + local rc + + run_cmd_nsc ${cmd} + rc=$? + if [ $rc -ne 0 ]; then + # show user the command if not done so already + if [ "$VERBOSE" = "0" ]; then + echo "setup command: $cmd" + fi + echo "failed. stopping tests" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue" + read a + fi + exit $rc + fi +} + # set sysctl values in NS-A set_sysctl() { @@ -471,6 +493,36 @@ setup() sleep 1 } +setup_lla_only() +{ + # make sure we are starting with a clean slate + kill_procs + cleanup 2>/dev/null + + log_debug "Configuring network namespaces" + set -e + + create_ns ${NSA} "-" "-" + create_ns ${NSB} "-" "-" + create_ns ${NSC} "-" "-" + connect_ns ${NSA} ${NSA_DEV} "-" "-" \ + ${NSB} ${NSB_DEV} "-" "-" + connect_ns ${NSA} ${NSA_DEV2} "-" "-" \ + ${NSC} ${NSC_DEV} "-" "-" + + NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV}) + NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV}) + NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV}) + + create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-" + ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF} + ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF} + + set +e + + sleep 1 +} + ################################################################################ # IPv4 @@ -3787,10 +3839,53 @@ use_case_br() setup_cmd_nsb ip li del vlan100 2>/dev/null } +# VRF only. +# ns-A device is connected to both ns-B and ns-C on a single VRF but only has +# LLA on the interfaces +use_case_ping_lla_multi() +{ + setup_lla_only + # only want reply from ns-A + setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 + setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B" + + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C" + + # cycle/flap the first ns-A interface + setup_cmd ip link set ${NSA_DEV} down + setup_cmd ip link set ${NSA_DEV} up + sleep 1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B" + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C" + + # cycle/flap the second ns-A interface + setup_cmd ip link set ${NSA_DEV2} down + setup_cmd ip link set ${NSA_DEV2} up + sleep 1 + + log_start + run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV} + log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B" + run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV} + log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C" +} + use_cases() { log_section "Use cases" + log_subsection "Device enslaved to bridge" use_case_br + log_subsection "Ping LLA with multiple interfaces" + use_case_ping_lla_multi } ################################################################################ diff --git a/tools/testing/selftests/net/forwarding/q_in_vni.sh b/tools/testing/selftests/net/forwarding/q_in_vni.sh new file mode 100755 index 000000000000..4c50c0234bce --- /dev/null +++ b/tools/testing/selftests/net/forwarding/q_in_vni.sh @@ -0,0 +1,347 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# +-----------------------+ +------------------------+ +# | H1 (vrf) | | H2 (vrf) | +# | + $h1.10 | | + $h2.10 | +# | | 192.0.2.1/28 | | | 192.0.2.2/28 | +# | | | | | | +# | | + $h1.20 | | | + $h2.20 | +# | \ | 198.51.100.1/24 | | \ | 198.51.100.2/24 | +# | \| | | \| | +# | + $h1 | | + $h2 | +# +----|------------------+ +----|-------------------+ +# | | +# +----|--------------------------------------------------|-------------------+ +# | SW | | | +# | +--|--------------------------------------------------|-----------------+ | +# | | + $swp1 BR1 (802.1ad) + $swp2 | | +# | | vid 100 pvid untagged vid 100 pvid | | +# | | untagged | | +# | | + vx100 (vxlan) | | +# | | local 192.0.2.17 | | +# | | remote 192.0.2.34 192.0.2.50 | | +# | | id 1000 dstport $VXPORT | | +# | | vid 100 pvid untagged | | +# | +-----------------------------------------------------------------------+ | +# | | +# | 192.0.2.32/28 via 192.0.2.18 | +# | 192.0.2.48/28 via 192.0.2.18 | +# | | +# | + $rp1 | +# | | 192.0.2.17/28 | +# +----|----------------------------------------------------------------------+ +# | +# +----|--------------------------------------------------------+ +# | | VRP2 (vrf) | +# | + $rp2 | +# | 192.0.2.18/28 | +# | | (maybe) HW +# ============================================================================= +# | | (likely) SW +# | + v1 (veth) + v3 (veth) | +# | | 192.0.2.33/28 | 192.0.2.49/28 | +# +----|---------------------------------------|----------------+ +# | | +# +----|------------------------------+ +----|------------------------------+ +# | + v2 (veth) NS1 (netns) | | + v4 (veth) NS2 (netns) | +# | 192.0.2.34/28 | | 192.0.2.50/28 | +# | | | | +# | 192.0.2.16/28 via 192.0.2.33 | | 192.0.2.16/28 via 192.0.2.49 | +# | 192.0.2.50/32 via 192.0.2.33 | | 192.0.2.34/32 via 192.0.2.49 | +# | | | | +# | +-------------------------------+ | | +-------------------------------+ | +# | | BR2 (802.1ad) | | | | BR2 (802.1ad) | | +# | | + vx100 (vxlan) | | | | + vx100 (vxlan) | | +# | | local 192.0.2.34 | | | | local 192.0.2.50 | | +# | | remote 192.0.2.17 | | | | remote 192.0.2.17 | | +# | | remote 192.0.2.50 | | | | remote 192.0.2.34 | | +# | | id 1000 dstport $VXPORT | | | | id 1000 dstport $VXPORT | | +# | | vid 100 pvid untagged | | | | vid 100 pvid untagged | | +# | | | | | | | | +# | | + w1 (veth) | | | | + w1 (veth) | | +# | | | vid 100 pvid untagged | | | | | vid 100 pvid untagged | | +# | +--|----------------------------+ | | +--|----------------------------+ | +# | | | | | | +# | +--|----------------------------+ | | +--|----------------------------+ | +# | | | VW2 (vrf) | | | | | VW2 (vrf) | | +# | | + w2 (veth) | | | | + w2 (veth) | | +# | | |\ | | | | |\ | | +# | | | + w2.10 | | | | | + w2.10 | | +# | | | 192.0.2.3/28 | | | | | 192.0.2.4/28 | | +# | | | | | | | | | | +# | | + w2.20 | | | | + w2.20 | | +# | | 198.51.100.3/24 | | | | 198.51.100.4/24 | | +# | +-------------------------------+ | | +-------------------------------+ | +# +-----------------------------------+ +-----------------------------------+ + +: ${VXPORT:=4789} +export VXPORT + +: ${ALL_TESTS:=" + ping_ipv4 + "} + +NUM_NETIFS=6 +source lib.sh + +h1_create() +{ + simple_if_init $h1 + tc qdisc add dev $h1 clsact + vlan_create $h1 10 v$h1 192.0.2.1/28 + vlan_create $h1 20 v$h1 198.51.100.1/24 +} + +h1_destroy() +{ + vlan_destroy $h1 20 + vlan_destroy $h1 10 + tc qdisc del dev $h1 clsact + simple_if_fini $h1 +} + +h2_create() +{ + simple_if_init $h2 + tc qdisc add dev $h2 clsact + vlan_create $h2 10 v$h2 192.0.2.2/28 + vlan_create $h2 20 v$h2 198.51.100.2/24 +} + +h2_destroy() +{ + vlan_destroy $h2 20 + vlan_destroy $h2 10 + tc qdisc del dev $h2 clsact + simple_if_fini $h2 +} + +rp1_set_addr() +{ + ip address add dev $rp1 192.0.2.17/28 + + ip route add 192.0.2.32/28 nexthop via 192.0.2.18 + ip route add 192.0.2.48/28 nexthop via 192.0.2.18 +} + +rp1_unset_addr() +{ + ip route del 192.0.2.48/28 nexthop via 192.0.2.18 + ip route del 192.0.2.32/28 nexthop via 192.0.2.18 + + ip address del dev $rp1 192.0.2.17/28 +} + +switch_create() +{ + ip link add name br1 type bridge vlan_filtering 1 vlan_protocol 802.1ad \ + vlan_default_pvid 0 mcast_snooping 0 + # Make sure the bridge uses the MAC address of the local port and not + # that of the VxLAN's device. + ip link set dev br1 address $(mac_get $swp1) + ip link set dev br1 up + + ip link set dev $rp1 up + rp1_set_addr + + ip link add name vx100 type vxlan id 1000 \ + local 192.0.2.17 dstport "$VXPORT" \ + nolearning noudpcsum tos inherit ttl 100 + ip link set dev vx100 up + + ip link set dev vx100 master br1 + bridge vlan add vid 100 dev vx100 pvid untagged + + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + bridge vlan add vid 100 dev $swp1 pvid untagged + + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + bridge vlan add vid 100 dev $swp2 pvid untagged + + bridge fdb append dev vx100 00:00:00:00:00:00 dst 192.0.2.34 self + bridge fdb append dev vx100 00:00:00:00:00:00 dst 192.0.2.50 self +} + +switch_destroy() +{ + bridge fdb del dev vx100 00:00:00:00:00:00 dst 192.0.2.50 self + bridge fdb del dev vx100 00:00:00:00:00:00 dst 192.0.2.34 self + + bridge vlan del vid 100 dev $swp2 + ip link set dev $swp2 down + ip link set dev $swp2 nomaster + + bridge vlan del vid 100 dev $swp1 + ip link set dev $swp1 down + ip link set dev $swp1 nomaster + + ip link set dev vx100 nomaster + ip link set dev vx100 down + ip link del dev vx100 + + rp1_unset_addr + ip link set dev $rp1 down + + ip link set dev br1 down + ip link del dev br1 +} + +vrp2_create() +{ + simple_if_init $rp2 192.0.2.18/28 + __simple_if_init v1 v$rp2 192.0.2.33/28 + __simple_if_init v3 v$rp2 192.0.2.49/28 + tc qdisc add dev v1 clsact +} + +vrp2_destroy() +{ + tc qdisc del dev v1 clsact + __simple_if_fini v3 192.0.2.49/28 + __simple_if_fini v1 192.0.2.33/28 + simple_if_fini $rp2 192.0.2.18/28 +} + +ns_init_common() +{ + local in_if=$1; shift + local in_addr=$1; shift + local other_in_addr=$1; shift + local nh_addr=$1; shift + local host_addr1=$1; shift + local host_addr2=$1; shift + + ip link set dev $in_if up + ip address add dev $in_if $in_addr/28 + tc qdisc add dev $in_if clsact + + ip link add name br2 type bridge vlan_filtering 1 vlan_protocol 802.1ad \ + vlan_default_pvid 0 + ip link set dev br2 up + + ip link add name w1 type veth peer name w2 + + ip link set dev w1 master br2 + ip link set dev w1 up + bridge vlan add vid 100 dev w1 pvid untagged + + ip link add name vx100 type vxlan id 1000 local $in_addr \ + dstport "$VXPORT" + ip link set dev vx100 up + bridge fdb append dev vx100 00:00:00:00:00:00 dst 192.0.2.17 self + bridge fdb append dev vx100 00:00:00:00:00:00 dst $other_in_addr self + + ip link set dev vx100 master br2 + tc qdisc add dev vx100 clsact + + bridge vlan add vid 100 dev vx100 pvid untagged + + simple_if_init w2 + vlan_create w2 10 vw2 $host_addr1/28 + vlan_create w2 20 vw2 $host_addr2/24 + + ip route add 192.0.2.16/28 nexthop via $nh_addr + ip route add $other_in_addr/32 nexthop via $nh_addr +} +export -f ns_init_common + +ns1_create() +{ + ip netns add ns1 + ip link set dev v2 netns ns1 + in_ns ns1 \ + ns_init_common v2 192.0.2.34 192.0.2.50 192.0.2.33 \ + 192.0.2.3 198.51.100.3 +} + +ns1_destroy() +{ + ip netns exec ns1 ip link set dev v2 netns 1 + ip netns del ns1 +} + +ns2_create() +{ + ip netns add ns2 + ip link set dev v4 netns ns2 + in_ns ns2 \ + ns_init_common v4 192.0.2.50 192.0.2.34 192.0.2.49 \ + 192.0.2.4 198.51.100.4 +} + +ns2_destroy() +{ + ip netns exec ns2 ip link set dev v4 netns 1 + ip netns del ns2 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + rp1=${NETIFS[p5]} + rp2=${NETIFS[p6]} + + vrf_prepare + forwarding_enable + + h1_create + h2_create + switch_create + + ip link add name v1 type veth peer name v2 + ip link add name v3 type veth peer name v4 + vrp2_create + ns1_create + ns2_create + + r1_mac=$(in_ns ns1 mac_get w2) + r2_mac=$(in_ns ns2 mac_get w2) + h2_mac=$(mac_get $h2) +} + +cleanup() +{ + pre_cleanup + + ns2_destroy + ns1_destroy + vrp2_destroy + ip link del dev v3 + ip link del dev v1 + + switch_destroy + h2_destroy + h1_destroy + + forwarding_restore + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.2 ": local->local" + ping_test $h1 192.0.2.3 ": local->remote 1" + ping_test $h1 192.0.2.4 ": local->remote 2" +} + +test_all() +{ + echo "Running tests with UDP port $VXPORT" + tests_run +} + +trap cleanup EXIT + +setup_prepare +setup_wait +test_all + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c index db3d4a8b5a4c..76a24052f4b4 100644 --- a/tools/testing/selftests/net/udpgso_bench_rx.c +++ b/tools/testing/selftests/net/udpgso_bench_rx.c @@ -113,6 +113,9 @@ static void do_poll(int fd, int timeout_ms) interrupted = true; break; } + + /* no events and more time to wait, do poll again */ + continue; } if (pfd.revents != POLLIN) error(1, errno, "poll: 0x%x expected 0x%x\n", diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 30873b19d04b..691893afc15d 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -60,9 +60,13 @@ ifeq ($(CAN_BUILD_X86_64),1) TEST_GEN_FILES += $(BINARIES_64) endif else + +ifneq (,$(findstring $(ARCH),powerpc)) TEST_GEN_FILES += protection_keys endif +endif + ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64)) TEST_GEN_FILES += va_128TBswitch TEST_GEN_FILES += virtual_address_range diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 9b0912a01777..c4425597769a 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -206,19 +206,19 @@ static int hugetlb_release_pages(char *rel_area) return ret; } - static void hugetlb_allocate_area(void **alloc_area) { void *area_alias = NULL; char **alloc_area_alias; + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, (map_shared ? MAP_SHARED : MAP_PRIVATE) | MAP_HUGETLB, huge_fd, *alloc_area == area_src ? 0 : nr_pages * page_size); if (*alloc_area == MAP_FAILED) { - fprintf(stderr, "mmap of hugetlbfs file failed\n"); - *alloc_area = NULL; + perror("mmap of hugetlbfs file failed"); + goto fail; } if (map_shared) { @@ -227,14 +227,11 @@ static void hugetlb_allocate_area(void **alloc_area) huge_fd, *alloc_area == area_src ? 0 : nr_pages * page_size); if (area_alias == MAP_FAILED) { - if (munmap(*alloc_area, nr_pages * page_size) < 0) { - perror("hugetlb munmap"); - exit(1); - } - *alloc_area = NULL; - return; + perror("mmap of hugetlb file alias failed"); + goto fail_munmap; } } + if (*alloc_area == area_src) { huge_fd_off0 = *alloc_area; alloc_area_alias = &area_src_alias; @@ -243,6 +240,16 @@ static void hugetlb_allocate_area(void **alloc_area) } if (area_alias) *alloc_area_alias = area_alias; + + return; + +fail_munmap: + if (munmap(*alloc_area, nr_pages * page_size) < 0) { + perror("hugetlb munmap"); + exit(1); + } +fail: + *alloc_area = NULL; } static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset) |