diff options
509 files changed, 15186 insertions, 5580 deletions
@@ -137,6 +137,7 @@ Filipe Lautert <[email protected]> Finn Thain <[email protected]> <[email protected]> Franck Bui-Huu <[email protected]> Frank Rowand <[email protected]> <[email protected]> +Frank Rowand <[email protected]> <[email protected]> Frank Rowand <[email protected]> <[email protected]> Frank Rowand <[email protected]> <[email protected]> Frank Zago <[email protected]> diff --git a/Documentation/ABI/testing/sysfs-devices-vfio-dev b/Documentation/ABI/testing/sysfs-devices-vfio-dev new file mode 100644 index 000000000000..e21424fd9666 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-vfio-dev @@ -0,0 +1,8 @@ +What: /sys/.../<device>/vfio-dev/vfioX/ +Date: September 2022 +Contact: Yi Liu <[email protected]> +Description: + This directory is created when the device is bound to a + vfio driver. The layout under this directory matches what + exists for a standard 'struct device'. 'X' is a unique + index marking this device in vfio. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 69b1533c1f02..a465d5242774 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6851,6 +6851,12 @@ Crash from Xen panic notifier, without executing late panic() code such as dumping handler. + xen_msr_safe= [X86,XEN] + Format: <bool> + Select whether to always use non-faulting (safe) MSR + access functions when running as Xen PV guest. The + default value is controlled by CONFIG_XEN_PV_MSR_SAFE. + xen_nopvspin [X86,XEN] Disables the qspinlock slowpath using Xen PV optimizations. This parameter is obsoleted by "nopvspin" parameter, which diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 835c8844bba4..98d1b198b2b4 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -65,6 +65,11 @@ combining the following values: 4 s3_beep = ======= +arch +==== + +The machine hardware name, the same output as ``uname -m`` +(e.g. ``x86_64`` or ``aarch64``). auto_msgmni =========== diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst index b52452bc2963..c3389c6f3838 100644 --- a/Documentation/dev-tools/checkpatch.rst +++ b/Documentation/dev-tools/checkpatch.rst @@ -612,6 +612,13 @@ Commit message See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes + **BAD_FIXES_TAG** + The Fixes: tag is malformed or does not follow the community conventions. + This can occur if the tag have been split into multiple lines (e.g., when + pasted in an email program with word wrapping enabled). + + See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes + Comparison style ---------------- diff --git a/Documentation/dev-tools/kunit/run_wrapper.rst b/Documentation/dev-tools/kunit/run_wrapper.rst index 6b33caf6c8ab..dafe8eb28d30 100644 --- a/Documentation/dev-tools/kunit/run_wrapper.rst +++ b/Documentation/dev-tools/kunit/run_wrapper.rst @@ -251,14 +251,15 @@ command line arguments: compiling a kernel (using ``build`` or ``run`` commands). For example: to enable compiler warnings, we can pass ``--make_options W=1``. -- ``--alltests``: Builds a UML kernel with all config options enabled - using ``make allyesconfig``. This allows us to run as many tests as - possible. - - .. note:: It is slow and prone to breakage as new options are - added or modified. Instead, enable all tests - which have satisfied dependencies by adding - ``CONFIG_KUNIT_ALL_TESTS=y`` to your ``.kunitconfig``. +- ``--alltests``: Enable a predefined set of options in order to build + as many tests as possible. + + .. note:: The list of enabled options can be found in + ``tools/testing/kunit/configs/all_tests.config``. + + If you only want to enable all tests with otherwise satisfied + dependencies, instead add ``CONFIG_KUNIT_ALL_TESTS=y`` to your + ``.kunitconfig``. - ``--kunitconfig``: Specifies the path or the directory of the ``.kunitconfig`` file. For example: diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,mu-msi.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,mu-msi.yaml new file mode 100644 index 000000000000..799ae5c3e32a --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,mu-msi.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/fsl,mu-msi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale/NXP i.MX Messaging Unit (MU) work as msi controller + +maintainers: + - Frank Li <[email protected]> + +description: | + The Messaging Unit module enables two processors within the SoC to + communicate and coordinate by passing messages (e.g. data, status + and control) through the MU interface. The MU also provides the ability + for one processor (A side) to signal the other processor (B side) using + interrupts. + + Because the MU manages the messaging between processors, the MU uses + different clocks (from each side of the different peripheral buses). + Therefore, the MU must synchronize the accesses from one side to the + other. The MU accomplishes synchronization using two sets of matching + registers (Processor A-side, Processor B-side). + + MU can work as msi interrupt controller to do doorbell + +allOf: + - $ref: /schemas/interrupt-controller/msi-controller.yaml# + +properties: + compatible: + enum: + - fsl,imx6sx-mu-msi + - fsl,imx7ulp-mu-msi + - fsl,imx8ulp-mu-msi + - fsl,imx8ulp-mu-msi-s4 + + reg: + items: + - description: a side register base address + - description: b side register base address + + reg-names: + items: + - const: processor-a-side + - const: processor-b-side + + interrupts: + description: a side interrupt number. + maxItems: 1 + + clocks: + maxItems: 1 + + power-domains: + items: + - description: a side power domain + - description: b side power domain + + power-domain-names: + items: + - const: processor-a-side + - const: processor-b-side + + interrupt-controller: true + + msi-controller: true + + "#msi-cells": + const: 0 + +required: + - compatible + - reg + - interrupts + - interrupt-controller + - msi-controller + - "#msi-cells" + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/firmware/imx/rsrc.h> + + msi-controller@5d270000 { + compatible = "fsl,imx6sx-mu-msi"; + msi-controller; + #msi-cells = <0>; + interrupt-controller; + reg = <0x5d270000 0x10000>, /* A side */ + <0x5d300000 0x10000>; /* B side */ + reg-names = "processor-a-side", "processor-b-side"; + interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&pd IMX_SC_R_MU_12A>, + <&pd IMX_SC_R_MU_12B>; + power-domain-names = "processor-a-side", "processor-b-side"; + }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml index 9e76fff20323..13a893b18fb6 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/realtek,rtl-intc.yaml @@ -6,6 +6,14 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Realtek RTL SoC interrupt controller devicetree bindings +description: + Interrupt controller and router for Realtek MIPS SoCs, allowing each SoC + interrupt to be routed to one parent CPU (hardware) interrupt, or left + disconnected. + All connected input lines from SoC peripherals can be masked individually, + and an interrupt status register is present to indicate which interrupts are + pending. + maintainers: - Birger Koblitz <[email protected]> - Bert Vermeulen <[email protected]> @@ -13,23 +21,33 @@ maintainers: properties: compatible: - const: realtek,rtl-intc + oneOf: + - items: + - enum: + - realtek,rtl8380-intc + - const: realtek,rtl-intc + - const: realtek,rtl-intc + deprecated: true "#interrupt-cells": + description: + SoC interrupt line index. const: 1 reg: maxItems: 1 interrupts: - maxItems: 1 + minItems: 1 + maxItems: 15 + description: + List of parent interrupts, in the order that they are connected to this + interrupt router's outputs, starting at the first output. interrupt-controller: true - "#address-cells": - const: 0 - interrupt-map: + deprecated: true description: Describes mapping from SoC interrupts to CPU interrupts required: @@ -37,21 +55,33 @@ required: - reg - "#interrupt-cells" - interrupt-controller - - "#address-cells" - - interrupt-map + +allOf: + - if: + properties: + compatible: + const: realtek,rtl-intc + then: + properties: + "#address-cells": + const: 0 + required: + - "#address-cells" + - interrupt-map + else: + required: + - interrupts additionalProperties: false examples: - | - intc: interrupt-controller@3000 { - compatible = "realtek,rtl-intc"; + interrupt-controller@3000 { + compatible = "realtek,rtl8380-intc", "realtek,rtl-intc"; #interrupt-cells = <1>; interrupt-controller; - reg = <0x3000 0x20>; - #address-cells = <0>; - interrupt-map = - <31 &cpuintc 2>, - <30 &cpuintc 1>, - <29 &cpuintc 5>; + reg = <0x3000 0x18>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>, <3>, <4>, <5>, <6>; }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml index 620f01775e42..62fd47c88275 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml @@ -37,6 +37,7 @@ properties: - renesas,intc-ex-r8a77990 # R-Car E3 - renesas,intc-ex-r8a77995 # R-Car D3 - renesas,intc-ex-r8a779a0 # R-Car V3U + - renesas,intc-ex-r8a779g0 # R-Car V4H - const: renesas,irqc '#interrupt-cells': diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml index 88c46e61732e..1151518859bd 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml @@ -59,6 +59,9 @@ properties: interrupt-controller: true + '#interrupt-cells': + const: 0 + msi-controller: true ti,interrupt-ranges: diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml index e12aee42b126..c99cc7323c71 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml @@ -58,6 +58,9 @@ properties: 1 = If intr supports edge triggered interrupts. 4 = If intr supports level triggered interrupts. + reg: + maxItems: 1 + interrupt-controller: true '#interrupt-cells': diff --git a/Documentation/devicetree/bindings/watchdog/atmel,at91sam9-wdt.yaml b/Documentation/devicetree/bindings/watchdog/atmel,at91sam9-wdt.yaml new file mode 100644 index 000000000000..ad27bc518670 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/atmel,at91sam9-wdt.yaml @@ -0,0 +1,127 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (C) 2022 Microchip Technology, Inc. and its subsidiaries +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/atmel,at91sam9-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Atmel Watchdog Timers + +maintainers: + - Eugen Hristev <[email protected]> + +properties: + compatible: + const: atmel,at91sam9260-wdt + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + interrupts: + maxItems: 1 + + atmel,max-heartbeat-sec: + description: + Should contain the maximum heartbeat value in seconds. This value + should be less or equal to 16. It is used to compute the WDV field. + maximum: 16 + + atmel,min-heartbeat-sec: + description: + Should contain the minimum heartbeat value in seconds. This value + must be smaller than the max-heartbeat-sec value. It is used to + compute the WDD field. + maximum: 16 + + atmel,watchdog-type: + $ref: /schemas/types.yaml#/definitions/string + description: | + Should be hardware or software. + oneOf: + - description: + Hardware watchdog uses the at91 watchdog reset. + const: hardware + - description: | + Software watchdog uses the watchdog interrupt + to trigger a software reset. + const: software + default: hardware + + atmel,reset-type: + $ref: /schemas/types.yaml#/definitions/string + description: | + Should be proc or all. This is valid only when using hardware watchdog. + oneOf: + - description: + Assert peripherals and processor reset signals. + const: all + - description: + Assert the processor reset signal. + const: proc + default: all + + atmel,disable: + $ref: /schemas/types.yaml#/definitions/flag + description: + Should be present if you want to stop the watchdog. + + atmel,idle-halt: + $ref: /schemas/types.yaml#/definitions/flag + description: | + Should be present if you want to stop the watchdog when + entering idle state. + CAUTION: This property should be used with care, it actually makes the + watchdog not counting when the CPU is in idle state, therefore the + watchdog reset time depends on mean CPU usage and will not reset at all + if the CPU stops working while it is in idle state, which is probably + not what you want. + + atmel,dbg-halt: + $ref: /schemas/types.yaml#/definitions/flag + description: | + Should be present if you want to stop the watchdog when + entering debug state. + +required: + - compatible + - reg + - clocks + +allOf: + - $ref: watchdog.yaml# + - if: + properties: + atmel,reset-type: + enum: + - all + - proc + then: + properties: + atmel,watchdog-type: + const: hardware + +dependencies: + atmel,reset-type: ['atmel,watchdog-type'] + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + + watchdog@fffffd40 { + compatible = "atmel,at91sam9260-wdt"; + reg = <0xfffffd40 0x10>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; + clocks = <&clk32k>; + timeout-sec = <15>; + atmel,watchdog-type = "hardware"; + atmel,reset-type = "all"; + atmel,dbg-halt; + atmel,idle-halt; + atmel,max-heartbeat-sec = <16>; + atmel,min-heartbeat-sec = <0>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt deleted file mode 100644 index 711a880b3d3b..000000000000 --- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt +++ /dev/null @@ -1,51 +0,0 @@ -* Atmel Watchdog Timers - -** at91sam9-wdt - -Required properties: -- compatible: must be "atmel,at91sam9260-wdt". -- reg: physical base address of the controller and length of memory mapped - region. -- clocks: phandle to input clock. - -Optional properties: -- timeout-sec: contains the watchdog timeout in seconds. -- interrupts : Should contain WDT interrupt. -- atmel,max-heartbeat-sec : Should contain the maximum heartbeat value in - seconds. This value should be less or equal to 16. It is used to - compute the WDV field. -- atmel,min-heartbeat-sec : Should contain the minimum heartbeat value in - seconds. This value must be smaller than the max-heartbeat-sec value. - It is used to compute the WDD field. -- atmel,watchdog-type : Should be "hardware" or "software". Hardware watchdog - use the at91 watchdog reset. Software watchdog use the watchdog - interrupt to trigger a software reset. -- atmel,reset-type : Should be "proc" or "all". - "all" : assert peripherals and processor reset signals - "proc" : assert the processor reset signal - This is valid only when using "hardware" watchdog. -- atmel,disable : Should be present if you want to disable the watchdog. -- atmel,idle-halt : Should be present if you want to stop the watchdog when - entering idle state. - CAUTION: This property should be used with care, it actually makes the - watchdog not counting when the CPU is in idle state, therefore the - watchdog reset time depends on mean CPU usage and will not reset at all - if the CPU stop working while it is in idle state, which is probably - not what you want. -- atmel,dbg-halt : Should be present if you want to stop the watchdog when - entering debug state. - -Example: - watchdog@fffffd40 { - compatible = "atmel,at91sam9260-wdt"; - reg = <0xfffffd40 0x10>; - interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&clk32k>; - timeout-sec = <15>; - atmel,watchdog-type = "hardware"; - atmel,reset-type = "all"; - atmel,dbg-halt; - atmel,idle-halt; - atmel,max-heartbeat-sec = <16>; - atmel,min-heartbeat-sec = <0>; - }; diff --git a/Documentation/devicetree/bindings/watchdog/mediatek,mt7621-wdt.yaml b/Documentation/devicetree/bindings/watchdog/mediatek,mt7621-wdt.yaml new file mode 100644 index 000000000000..b2b17fdf4e39 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/mediatek,mt7621-wdt.yaml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/mediatek,mt7621-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ralink Watchdog Timers + +maintainers: + - Sergio Paracuellos <[email protected]> + +allOf: + - $ref: watchdog.yaml# + +properties: + compatible: + const: mediatek,mt7621-wdt + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + watchdog@100 { + compatible = "mediatek,mt7621-wdt"; + reg = <0x100 0x100>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/mt7621-wdt.txt b/Documentation/devicetree/bindings/watchdog/mt7621-wdt.txt deleted file mode 100644 index c15ef0ef609f..000000000000 --- a/Documentation/devicetree/bindings/watchdog/mt7621-wdt.txt +++ /dev/null @@ -1,12 +0,0 @@ -Ralink Watchdog Timers - -Required properties: -- compatible: must be "mediatek,mt7621-wdt" -- reg: physical base address of the controller and length of the register range - -Example: - - watchdog@100 { - compatible = "mediatek,mt7621-wdt"; - reg = <0x100 0x10>; - }; diff --git a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt b/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt deleted file mode 100644 index c6ae9c9d5e3e..000000000000 --- a/Documentation/devicetree/bindings/watchdog/of-xilinx-wdt.txt +++ /dev/null @@ -1,26 +0,0 @@ -Xilinx AXI/PLB soft-core watchdog Device Tree Bindings ---------------------------------------------------------- - -Required properties: -- compatible : Should be "xlnx,xps-timebase-wdt-1.00.a" or - "xlnx,xps-timebase-wdt-1.01.a". -- reg : Physical base address and size - -Optional properties: -- clocks : Input clock specifier. Refer to common clock - bindings. -- clock-frequency : Frequency of clock in Hz -- xlnx,wdt-enable-once : 0 - Watchdog can be restarted - 1 - Watchdog can be enabled just once -- xlnx,wdt-interval : Watchdog timeout interval in 2^<val> clock cycles, - <val> is integer from 8 to 31. - -Example: -axi-timebase-wdt@40100000 { - clock-frequency = <50000000>; - compatible = "xlnx,xps-timebase-wdt-1.00.a"; - clocks = <&clkc 15>; - reg = <0x40100000 0x10000>; - xlnx,wdt-enable-once = <0x0>; - xlnx,wdt-interval = <0x1b>; -} ; diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml index a8d7dde5271b..26b1815a6753 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml @@ -33,6 +33,11 @@ properties: - items: - enum: + - renesas,r9a09g011-wdt # RZ/V2M + - const: renesas,rzv2m-wdt # RZ/V2M + + - items: + - enum: - renesas,r8a7742-wdt # RZ/G1H - renesas,r8a7743-wdt # RZ/G1M - renesas,r8a7744-wdt # RZ/G1N @@ -65,18 +70,35 @@ properties: - enum: - renesas,r8a779a0-wdt # R-Car V3U - renesas,r8a779f0-wdt # R-Car S4-8 + - renesas,r8a779g0-wdt # R-Car V4H - const: renesas,rcar-gen4-wdt # R-Car Gen4 reg: maxItems: 1 - interrupts: true - - interrupt-names: true - - clocks: true - - clock-names: true + interrupts: + minItems: 1 + items: + - description: Timeout + - description: Parity error + + interrupt-names: + minItems: 1 + items: + - const: wdt + - const: perrout + + clocks: + minItems: 1 + items: + - description: Register access clock + - description: Main clock + + clock-names: + minItems: 1 + items: + - const: pclk + - const: oscclk power-domains: maxItems: 1 @@ -89,6 +111,7 @@ properties: required: - compatible - reg + - interrupts - clocks allOf: @@ -113,31 +136,38 @@ allOf: contains: enum: - renesas,rzg2l-wdt + - renesas,rzv2m-wdt then: properties: - interrupts: - maxItems: 2 - interrupt-names: - items: - - const: wdt - - const: perrout clocks: - items: - - description: Register access clock - - description: Main clock + minItems: 2 clock-names: - items: - - const: pclk - - const: oscclk + minItems: 2 required: - clock-names + else: + properties: + clocks: + maxItems: 1 + + - if: + properties: + compatible: + contains: + enum: + - renesas,rzg2l-wdt + then: + properties: + interrupts: + minItems: 2 + interrupt-names: + minItems: 2 + required: - interrupt-names else: properties: interrupts: maxItems: 1 - clocks: - maxItems: 1 additionalProperties: false @@ -145,9 +175,11 @@ examples: - | #include <dt-bindings/clock/r8a7795-cpg-mssr.h> #include <dt-bindings/power/r8a7795-sysc.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> wdt0: watchdog@e6020000 { compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt"; reg = <0xe6020000 0x0c>; + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 402>; power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; resets = <&cpg 402>; diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml index b08373336b16..8fb6656ba0c2 100644 --- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml @@ -23,6 +23,7 @@ properties: - samsung,exynos5420-wdt # for Exynos5420 - samsung,exynos7-wdt # for Exynos7 - samsung,exynos850-wdt # for Exynos850 + - samsung,exynosautov9-wdt # for Exynosautov9 reg: maxItems: 1 @@ -67,6 +68,7 @@ allOf: - samsung,exynos5420-wdt - samsung,exynos7-wdt - samsung,exynos850-wdt + - samsung,exynosautov9-wdt then: required: - samsung,syscon-phandle @@ -76,6 +78,7 @@ allOf: contains: enum: - samsung,exynos850-wdt + - samsung,exynosautov9-wdt then: properties: clocks: diff --git a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml index 6461eb4f4a27..92df6e453f64 100644 --- a/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/snps,dw-wdt.yaml @@ -20,6 +20,7 @@ properties: - enum: - rockchip,px30-wdt - rockchip,rk3066-wdt + - rockchip,rk3128-wdt - rockchip,rk3188-wdt - rockchip,rk3228-wdt - rockchip,rk3288-wdt diff --git a/Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml b/Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml index 690e19ce4b87..eba083822d1f 100644 --- a/Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml @@ -35,20 +35,16 @@ additionalProperties: false examples: - | + #include <dt-bindings/clock/toshiba,tmpv770x.h> + soc { #address-cells = <2>; #size-cells = <2>; - wdt_clk: wdt-clk { - compatible = "fixed-clock"; - clock-frequency = <150000000>; - #clock-cells = <0>; - }; - - watchdog@28330000 { + wdt: watchdog@28330000 { compatible = "toshiba,visconti-wdt"; reg = <0 0x28330000 0 0x1000>; - clocks = <&wdt_clk>; timeout-sec = <20>; + clocks = <&pismu TMPV770X_CLK_WDTCLK>; }; }; diff --git a/Documentation/devicetree/bindings/watchdog/xlnx,xps-timebase-wdt.yaml b/Documentation/devicetree/bindings/watchdog/xlnx,xps-timebase-wdt.yaml new file mode 100644 index 000000000000..493a1c954707 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/xlnx,xps-timebase-wdt.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/watchdog/xlnx,xps-timebase-wdt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xilinx AXI/PLB softcore and window Watchdog Timer + +maintainers: + - Shubhrajyoti Datta <[email protected]> + - Srinivas Neeli <[email protected]> + +description: + The Timebase watchdog timer(WDT) is a free-running 32 bit counter. + WDT uses a dual-expiration architecture. After one expiration of + the timeout interval, an interrupt is generated and the WDT state + bit is set to one in the status register. If the state bit is not + cleared (by writing a one to the state bit) before the next + expiration of the timeout interval, a WDT reset is generated. + +allOf: + - $ref: watchdog.yaml# + +properties: + compatible: + enum: + - xlnx,xps-timebase-wdt-1.01.a + - xlnx,xps-timebase-wdt-1.00.a + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-frequency: + description: Frequency of clock in Hz + + xlnx,wdt-interval: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Watchdog timeout interval + minimum: 8 + maximum: 32 + + xlnx,wdt-enable-once: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1] + description: If watchdog is configured as enable once, + then the watchdog cannot be disabled after + it has been enabled. + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + watchdog@40100000 { + compatible = "xlnx,xps-timebase-wdt-1.00.a"; + reg = <0x40100000 0x1000>; + clock-frequency = <50000000>; + clocks = <&clkc 15>; + xlnx,wdt-enable-once = <0x0>; + xlnx,wdt-interval = <0x1b>; + }; +... diff --git a/Documentation/driver-api/vfio-mediated-device.rst b/Documentation/driver-api/vfio-mediated-device.rst index f47dca6645aa..fdf7d69378ec 100644 --- a/Documentation/driver-api/vfio-mediated-device.rst +++ b/Documentation/driver-api/vfio-mediated-device.rst @@ -58,19 +58,19 @@ devices as examples, as these devices are the first devices to use this module:: | MDEV CORE | | MODULE | | mdev.ko | - | +-----------+ | mdev_register_device() +--------------+ + | +-----------+ | mdev_register_parent() +--------------+ | | | +<------------------------+ | | | | | | nvidia.ko |<-> physical | | | +------------------------>+ | device | | | | callbacks +--------------+ | | Physical | | - | | device | | mdev_register_device() +--------------+ + | | device | | mdev_register_parent() +--------------+ | | interface | |<------------------------+ | | | | | | i915.ko |<-> physical | | | +------------------------>+ | device | | | | callbacks +--------------+ | | | | - | | | | mdev_register_device() +--------------+ + | | | | mdev_register_parent() +--------------+ | | | +<------------------------+ | | | | | | ccw_device.ko|<-> physical | | | +------------------------>+ | device @@ -103,7 +103,8 @@ structure to represent a mediated device's driver:: struct mdev_driver { int (*probe) (struct mdev_device *dev); void (*remove) (struct mdev_device *dev); - struct attribute_group **supported_type_groups; + unsigned int (*get_available)(struct mdev_type *mtype); + ssize_t (*show_description)(struct mdev_type *mtype, char *buf); struct device_driver driver; }; @@ -125,8 +126,8 @@ vfio_device_ops. When a driver wants to add the GUID creation sysfs to an existing device it has probe'd to then it should call:: - int mdev_register_device(struct device *dev, - struct mdev_driver *mdev_driver); + int mdev_register_parent(struct mdev_parent *parent, struct device *dev, + struct mdev_driver *mdev_driver); This will provide the 'mdev_supported_types/XX/create' files which can then be used to trigger the creation of a mdev_device. The created mdev_device will be @@ -134,7 +135,7 @@ attached to the specified driver. When the driver needs to remove itself it calls:: - void mdev_unregister_device(struct device *dev); + void mdev_unregister_parent(struct mdev_parent *parent); Which will unbind and destroy all the created mdevs and remove the sysfs files. @@ -200,17 +201,14 @@ Directories and files under the sysfs for Each Physical Device sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name); - (or using mdev_parent_dev(mdev) to arrive at the parent device outside - of the core mdev code) - * device_api - This attribute should show which device API is being created, for example, + This attribute shows which device API is being created, for example, "vfio-pci" for a PCI device. * available_instances - This attribute should show the number of devices of type <type-id> that can be + This attribute shows the number of devices of type <type-id> that can be created. * [device] @@ -220,11 +218,11 @@ Directories and files under the sysfs for Each Physical Device * name - This attribute should show human readable name. This is optional attribute. + This attribute shows a human readable name. * description - This attribute should show brief features/description of the type. This is + This attribute can show brief features/description of the type. This is an optional attribute. Directories and Files Under the sysfs for Each mdev Device diff --git a/Documentation/fault-injection/notifier-error-inject.rst b/Documentation/fault-injection/notifier-error-inject.rst index 1668b6e48d3a..fdf2dc433ead 100644 --- a/Documentation/fault-injection/notifier-error-inject.rst +++ b/Documentation/fault-injection/notifier-error-inject.rst @@ -91,8 +91,8 @@ For more usage examples There are tools/testing/selftests using the notifier error injection features for CPU and memory notifiers. - * tools/testing/selftests/cpu-hotplug/on-off-test.sh - * tools/testing/selftests/memory-hotplug/on-off-test.sh + * tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh + * tools/testing/selftests/memory-hotplug/mem-on-off-test.sh These scripts first do simple online and offline tests and then do fault injection tests if notifier error injection module is available. diff --git a/Documentation/filesystems/ceph.rst b/Documentation/filesystems/ceph.rst index 4942e018db85..76ce938e7024 100644 --- a/Documentation/filesystems/ceph.rst +++ b/Documentation/filesystems/ceph.rst @@ -203,7 +203,6 @@ For more information on Ceph, see the home page at The Linux kernel client source tree is available at - https://github.com/ceph/ceph-client.git - - git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git and the source for the full system is at https://github.com/ceph/ceph.git diff --git a/Documentation/filesystems/ubifs.rst b/Documentation/filesystems/ubifs.rst index e6ee99762534..ced2f7679ddb 100644 --- a/Documentation/filesystems/ubifs.rst +++ b/Documentation/filesystems/ubifs.rst @@ -59,7 +59,7 @@ differences. * JFFS2 is a write-through file-system, while UBIFS supports write-back, which makes UBIFS much faster on writes. -Similarly to JFFS2, UBIFS supports on-the-flight compression which makes +Similarly to JFFS2, UBIFS supports on-the-fly compression which makes it possible to fit quite a lot of data to the flash. Similarly to JFFS2, UBIFS is tolerant of unclean reboots and power-cuts. diff --git a/Documentation/mm/page_owner.rst b/Documentation/mm/page_owner.rst index f18fd8907049..127514955a5e 100644 --- a/Documentation/mm/page_owner.rst +++ b/Documentation/mm/page_owner.rst @@ -38,22 +38,10 @@ not affect to allocation performance, especially if the static keys jump label patching functionality is available. Following is the kernel's code size change due to this facility. -- Without page owner:: - - text data bss dec hex filename - 48392 2333 644 51369 c8a9 mm/page_alloc.o - -- With page owner:: - - text data bss dec hex filename - 48800 2445 644 51889 cab1 mm/page_alloc.o - 6662 108 29 6799 1a8f mm/page_owner.o - 1025 8 8 1041 411 mm/page_ext.o - -Although, roughly, 8 KB code is added in total, page_alloc.o increase by -520 bytes and less than half of it is in hotpath. Building the kernel with -page owner and turning it on if needed would be great option to debug -kernel memory problem. +Although enabling page owner increases kernel size by several kilobytes, +most of this code is outside page allocator and its hot path. Building +the kernel with page owner and turning it on if needed would be great +option to debug kernel memory problem. There is one notice that is caused by implementation detail. page owner stores information into the memory from struct page extension. This memory diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst index 06f4fcdb58b6..d11329a08984 100644 --- a/Documentation/networking/phy.rst +++ b/Documentation/networking/phy.rst @@ -120,7 +120,7 @@ required delays, as defined per the RGMII standard, several options may be available: * Some SoCs may offer a pin pad/mux/controller capable of configuring a given - set of pins'strength, delays, and voltage; and it may be a suitable + set of pins' strength, delays, and voltage; and it may be a suitable option to insert the expected 2ns RGMII delay. * Modifying the PCB design to include a fixed delay (e.g: using a specifically diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst index cd6997a9d203..bd15c393ba3c 100644 --- a/Documentation/process/howto.rst +++ b/Documentation/process/howto.rst @@ -379,7 +379,7 @@ to subscribe and unsubscribe from the list can be found at: There are archives of the mailing list on the web in many different places. Use a search engine to find these archives. For example: - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ It is highly recommended that you search the archives about the topic you want to bring up, before you post it to the list. A lot of things diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/s390/vfio-ap.rst index 61a0a3c6c7b4..00f4a04f6d4c 100644 --- a/Documentation/s390/vfio-ap.rst +++ b/Documentation/s390/vfio-ap.rst @@ -297,7 +297,7 @@ of the VFIO AP mediated device driver:: | MDEV CORE | | MODULE | | mdev.ko | - | +---------+ | mdev_register_device() +--------------+ + | +---------+ | mdev_register_parent() +--------------+ | |Physical | +<-----------------------+ | | | device | | | vfio_ap.ko |<-> matrix | |interface| +----------------------->+ | device diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/s390/vfio-ccw.rst index 8aad08a8b8a5..ea928a3806f4 100644 --- a/Documentation/s390/vfio-ccw.rst +++ b/Documentation/s390/vfio-ccw.rst @@ -156,7 +156,7 @@ Below is a high Level block diagram:: | MDEV CORE | | MODULE | | mdev.ko | - | +---------+ | mdev_register_device() +--------------+ + | +---------+ | mdev_register_parent() +--------------+ | |Physical | +<-----------------------+ | | | device | | | vfio_ccw.ko |<-> subchannel | |interface| +----------------------->+ | device diff --git a/Documentation/tools/rtla/rtla-timerlat-top.rst b/Documentation/tools/rtla/rtla-timerlat-top.rst index 1c321de1c171..7c4e4b109493 100644 --- a/Documentation/tools/rtla/rtla-timerlat-top.rst +++ b/Documentation/tools/rtla/rtla-timerlat-top.rst @@ -39,7 +39,7 @@ higher than *30 us*. It is also set to stop the session if a *Thread* timer latency higher than *30 us* is hit. Finally, it is set to save the trace buffer if the stop condition is hit:: - [root@alien ~]# rtla timerlat top -s 30 -t 30 -T + [root@alien ~]# rtla timerlat top -s 30 -T 30 -t Timer Latency 0 00:00:59 | IRQ Timer Latency (us) | Thread Timer Latency (us) CPU COUNT | cur min avg max | cur min avg max diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index b37dc19e4d40..60bceb018d6a 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -564,7 +564,7 @@ of ftrace. Here is a list of some of the key files: start:: - trace_fd = open("trace_marker", WR_ONLY); + trace_fd = open("trace_marker", O_WRONLY); Note: Writing into the trace_marker file can also initiate triggers that are written into /sys/kernel/tracing/events/ftrace/print/trigger diff --git a/Documentation/translations/it_IT/process/howto.rst b/Documentation/translations/it_IT/process/howto.rst index 16ad5622d549..15c08aea1dfe 100644 --- a/Documentation/translations/it_IT/process/howto.rst +++ b/Documentation/translations/it_IT/process/howto.rst @@ -394,7 +394,7 @@ trovati al sito: Ci sono diversi archivi della lista di discussione. Usate un qualsiasi motore di ricerca per trovarli. Per esempio: - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ É caldamente consigliata una ricerca in questi archivi sul tema che volete sollevare, prima di pubblicarlo sulla lista. Molte cose sono già state diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst index 649e2ff2a407..b47a682d8ded 100644 --- a/Documentation/translations/ja_JP/howto.rst +++ b/Documentation/translations/ja_JP/howto.rst @@ -410,7 +410,7 @@ https://bugzilla.kernel.org に行ってください。もし今後のバグレ� このメーリングリストのアーカイブは web 上の多数の場所に存在します。こ れらのアーカイブを探すにはサーチエンジンを使いましょう。例えば- - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ リストに投稿する前にすでにその話題がアーカイブに存在するかどうかを検索 することを是非やってください。多数の事がすでに詳細に渡って議論されてお diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst index e43970584ca4..df53fafd1b10 100644 --- a/Documentation/translations/ko_KR/howto.rst +++ b/Documentation/translations/ko_KR/howto.rst @@ -386,7 +386,7 @@ https://bugzilla.kernel.org 를 체크하고자 할 수도 있다; 소수의 커 웹상의 많은 다른 곳에도 메일링 리스트의 아카이브들이 있다. 이러한 아카이브들을 찾으려면 검색 엔진을 사용하라. 예를 들어: - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ 여러분이 새로운 문제에 관해 리스트에 올리기 전에 말하고 싶은 주제에 관한 것을 아카이브에서 먼저 찾아보기를 강력히 권장한다. 이미 상세하게 토론된 많은 diff --git a/Documentation/translations/zh_CN/arch.rst b/Documentation/translations/zh_CN/arch.rst new file mode 100644 index 000000000000..690e173d8b2a --- /dev/null +++ b/Documentation/translations/zh_CN/arch.rst @@ -0,0 +1,29 @@ +.. SPDX-License-Identifier: GPL-2.0 + +处理器体系结构 +============== + +以下文档提供了具体架构实现的编程细节。 + +.. toctree:: + :maxdepth: 2 + + mips/index + arm64/index + riscv/index + openrisc/index + parisc/index + loongarch/index + +TODOList: + +* arm/index +* ia64/index +* m68k/index +* nios2/index +* powerpc/index +* s390/index +* sh/index +* sparc/index +* x86/index +* xtensa/index diff --git a/Documentation/translations/zh_CN/devicetree/changesets.rst b/Documentation/translations/zh_CN/devicetree/changesets.rst index 2ace05f3c377..3df1b03c5695 100644 --- a/Documentation/translations/zh_CN/devicetree/changesets.rst +++ b/Documentation/translations/zh_CN/devicetree/changesets.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 .. include:: ../disclaimer-zh_CN.rst -:Original: Documentation/Devicetree/changesets.rst +:Original: Documentation/devicetree/changesets.rst :翻译: diff --git a/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst index 115190341305..6dfd946d7093 100644 --- a/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst +++ b/Documentation/translations/zh_CN/devicetree/dynamic-resolution-notes.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 .. include:: ../disclaimer-zh_CN.rst -:Original: Documentation/Devicetree/dynamic-resolution-notes.rst +:Original: Documentation/devicetree/dynamic-resolution-notes.rst :翻译: diff --git a/Documentation/translations/zh_CN/devicetree/kernel-api.rst b/Documentation/translations/zh_CN/devicetree/kernel-api.rst index 6aa3b685494e..2fb729368b40 100644 --- a/Documentation/translations/zh_CN/devicetree/kernel-api.rst +++ b/Documentation/translations/zh_CN/devicetree/kernel-api.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 .. include:: ../disclaimer-zh_CN.rst -:Original: Documentation/Devicetree/kernel-api.rst +:Original: Documentation/devicetree/kernel-api.rst :翻译: diff --git a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst index 1bd482cb0a1b..43e3c0bc5a9f 100644 --- a/Documentation/translations/zh_CN/devicetree/overlay-notes.rst +++ b/Documentation/translations/zh_CN/devicetree/overlay-notes.rst @@ -1,7 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 .. include:: ../disclaimer-zh_CN.rst -:Original: Documentation/Devicetree/overlay-notes.rst +:Original: Documentation/devicetree/overlay-notes.rst :翻译: diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst index 2fc60e60feb4..ec99ef5fe990 100644 --- a/Documentation/translations/zh_CN/index.rst +++ b/Documentation/translations/zh_CN/index.rst @@ -26,165 +26,100 @@ 顺便说下,中文文档也需要遵守内核编码风格,风格中中文和英文的主要不同就是中文 的字符标点占用两个英文字符宽度, 所以,当英文要求不要超过每行100个字符时, 中文就不要超过50个字符。另外,也要注意'-','=' 等符号与相关标题的对齐。在将 -补丁提交到社区之前,一定要进行必要的checkpatch.pl检查和编译测试。 +补丁提交到社区之前,一定要进行必要的 ``checkpatch.pl`` 检查和编译测试。 -许可证文档 ----------- - -下面的文档介绍了Linux内核源代码的许可证(GPLv2)、如何在源代码树中正确标记 -单个文件的许可证、以及指向完整许可证文本的链接。 +与Linux 内核社区一起工作 +------------------------ -* Documentation/translations/zh_CN/process/license-rules.rst - -用户文档 --------- - -下面的手册是为内核用户编写的——即那些试图让它在给定系统上以最佳方式工作的 -用户。 +与内核开发社区进行协作并将工作推向上游的基本指南。 .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - admin-guide/index - -TODOList: - -* kbuild/index + process/development-process + process/submitting-patches + 行为准则 <process/code-of-conduct> + maintainer/index + 完整开发流程文档 <process/index> -固件相关文档 ------------- +内部API文档 +----------- -下列文档描述了内核需要的平台固件相关信息。 +开发人员使用的内核内部交互接口手册。 .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - devicetree/index + core-api/index + driver-api/index + 内核中的锁 <locking/index> TODOList: -* firmware-guide/index - -应用程序开发人员文档 --------------------- - -用户空间API手册涵盖了描述应用程序开发人员可见内核接口方面的文档。 +* subsystem-apis -TODOlist: +开发工具和流程 +-------------- -* userspace-api/index - -内核开发简介 ------------- - -这些手册包含有关如何开发内核的整体信息。内核社区非常庞大,一年下来有数千名 -开发人员做出贡献。与任何大型社区一样,知道如何完成任务将使得更改合并的过程 -变得更加容易。 +为所有内核开发人员提供有用信息的各种其他手册。 .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - process/index - dev-tools/index + process/license-rules doc-guide/index + dev-tools/index + dev-tools/testing-overview kernel-hacking/index - maintainer/index TODOList: * trace/index * fault-injection/index * livepatch/index -* rust/index -内核API文档 ------------ +面向用户的文档 +-------------- -以下手册从内核开发人员的角度详细介绍了特定的内核子系统是如何工作的。这里的 -大部分信息都是直接从内核源代码获取的,并根据需要添加补充材料(或者至少是在 -我们设法添加的时候——可能不是所有的都是有需要的)。 +下列手册针对 +希望内核在给定系统上以最佳方式工作的*用户*, +和查找内核用户空间API信息的程序开发人员。 .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - core-api/index - driver-api/index - locking/index - accounting/index - cpu-freq/index - iio/index - infiniband/index - power/index - virt/index - sound/index - filesystems/index - scheduler/index - mm/index - peci/index - PCI/index + admin-guide/index + admin-guide/reporting-issues.rst TODOList: -* block/index -* cdrom/index -* ide/index -* fb/index -* fpga/index -* hid/index -* i2c/index -* isdn/index -* leds/index -* netlabel/index -* networking/index -* pcmcia/index -* target/index -* timers/index -* spi/index -* w1/index -* watchdog/index -* input/index -* hwmon/index -* gpu/index -* security/index -* crypto/index -* bpf/index -* usb/index -* scsi/index -* misc-devices/index -* mhi/index - -体系结构无关文档 ----------------- +* 内核构建系统 <kbuild/index> +* 用户空间工具 <tools/index> +* userspace-api/index -TODOList: +也可参考独立于内核文档的 `Linux 手册页 <https://www.kernel.org/doc/man-pages/>`_ 。 -* asm-annotations +固件相关文档 +------------ -特定体系结构文档 ----------------- +下列文档描述了内核需要的平台固件相关信息。 .. toctree:: :maxdepth: 2 - mips/index - arm64/index - riscv/index - openrisc/index - parisc/index - loongarch/index + devicetree/index TODOList: -* arm/index -* ia64/index -* m68k/index -* nios2/index -* powerpc/index -* s390/index -* sh/index -* sparc/index -* x86/index -* xtensa/index +* firmware-guide/index + +体系结构文档 +------------ + +.. toctree:: + :maxdepth: 2 + + arch 其他文档 -------- @@ -195,9 +130,9 @@ TODOList: TODOList: * staging/index -* watch_queue -目录和表格 + +索引和表格 ---------- * :ref:`genindex` diff --git a/Documentation/translations/zh_CN/mm/ksm.rst b/Documentation/translations/zh_CN/mm/ksm.rst index d1f82e857ad7..f0f458753d0c 100644 --- a/Documentation/translations/zh_CN/mm/ksm.rst +++ b/Documentation/translations/zh_CN/mm/ksm.rst @@ -30,7 +30,7 @@ KSM的用户空间的接口在Documentation/translations/zh_CN/admin-guide/mm/ks KSM维护着稳定树中的KSM页的逆映射信息。 当KSM页面的共享数小于 ``max_page_sharing`` 的虚拟内存区域(VMAs)时,则代表了 -KSM页的稳定树其中的节点指向了一个rmap_item结构体类型的列表。同时,这个KSM页 +KSM页的稳定树其中的节点指向了一个ksm_rmap_item结构体类型的列表。同时,这个KSM页 的 ``page->mapping`` 指向了该稳定树节点。 如果共享数超过了阈值,KSM将给稳定树添加第二个维度。稳定树就变成链接一个或多 diff --git a/Documentation/translations/zh_CN/mm/page_owner.rst b/Documentation/translations/zh_CN/mm/page_owner.rst index b7f81d7a6589..21a6a0837d42 100644 --- a/Documentation/translations/zh_CN/mm/page_owner.rst +++ b/Documentation/translations/zh_CN/mm/page_owner.rst @@ -74,15 +74,19 @@ page owner在默认情况下是禁用的。所以,如果你想使用它,你� cat /sys/kernel/debug/page_owner > page_owner_full.txt ./page_owner_sort page_owner_full.txt sorted_page_owner.txt - ``page_owner_full.txt`` 的一般输出情况如下(输出信息无翻译价值):: + ``page_owner_full.txt`` 的一般输出情况如下:: Page allocated via order XXX, ... PFN XXX ... - // Detailed stack + // 栈详情 Page allocated via order XXX, ... PFN XXX ... - // Detailed stack + // 栈详情 + 默认情况下,它将以一个给定的pfn开始,做完整的pfn转储,且page_owner支持fseek。 + + FILE *fp = fopen("/sys/kernel/debug/page_owner", "r"); + fseek(fp, pfn_start, SEEK_SET); ``page_owner_sort`` 工具忽略了 ``PFN`` 行,将剩余的行放在buf中,使用regexp提 取页序值,计算buf的次数和页数,最后根据参数进行排序。 diff --git a/Documentation/translations/zh_CN/process/howto.rst b/Documentation/translations/zh_CN/process/howto.rst index 1455190dc087..5bf953146929 100644 --- a/Documentation/translations/zh_CN/process/howto.rst +++ b/Documentation/translations/zh_CN/process/howto.rst @@ -306,7 +306,7 @@ bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。� 网上很多地方都有这个邮件列表的存档(archive)。可以使用搜索引擎来找到这些 存档。比如: - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ 在发信之前,我们强烈建议你先在存档中搜索你想要讨论的问题。很多已经被详细 讨论过的问题只在邮件列表的存档中可以找到。 diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst index a683dbea0c83..a1a35f88f4ae 100644 --- a/Documentation/translations/zh_CN/process/index.rst +++ b/Documentation/translations/zh_CN/process/index.rst @@ -10,6 +10,7 @@ .. _cn_process_index: +======================== 与Linux 内核社区一起工作 ======================== diff --git a/Documentation/translations/zh_TW/process/howto.rst b/Documentation/translations/zh_TW/process/howto.rst index 68ae4411285b..86b0d4c6d6f9 100644 --- a/Documentation/translations/zh_TW/process/howto.rst +++ b/Documentation/translations/zh_TW/process/howto.rst @@ -309,7 +309,7 @@ bugzilla.kernel.org是Linux內核開發者們用來跟蹤內核Bug的網站。� 網上很多地方都有這個郵件列表的存檔(archive)。可以使用搜尋引擎來找到這些 存檔。比如: - http://dir.gmane.org/gmane.linux.kernel + https://lore.kernel.org/lkml/ 在發信之前,我們強烈建議你先在存檔中搜索你想要討論的問題。很多已經被詳細 討論過的問題只在郵件列表的存檔中可以找到。 diff --git a/MAINTAINERS b/MAINTAINERS index a497bd2b0ea6..fb0a565082fc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18662,6 +18662,7 @@ F: drivers/misc/sgi-xp/ SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS M: Karsten Graul <[email protected]> M: Wenjia Zhang <[email protected]> +M: Jan Karcher <[email protected]> S: Supported W: http://www.ibm.com/developerworks/linux/linux390/ @@ -21550,6 +21551,7 @@ R: Cornelia Huck <[email protected]> S: Maintained T: git git://github.com/awilliam/linux-vfio.git +F: Documentation/ABI/testing/sysfs-devices-vfio-dev F: Documentation/driver-api/vfio.rst F: drivers/vfio/ F: include/linux/vfio.h @@ -21730,6 +21732,10 @@ F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: tools/virtio/ +IFCVF VIRTIO DATA PATH ACCELERATOR +R: Zhu Lingshan <[email protected]> +F: drivers/vdpa/ifcvf/ + VIRTIO BALLOON M: "Michael S. Tsirkin" <[email protected]> M: David Hildenbrand <[email protected]> diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig index 7e9336930880..6a39fe8ce9e5 100644 --- a/arch/alpha/configs/defconfig +++ b/arch/alpha/configs/defconfig @@ -65,7 +65,7 @@ CONFIG_NFSD=m CONFIG_NLS_CODEPAGE_437=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_ALPHA_LEGACY_START_ADDRESS=y CONFIG_MATHEMU=y CONFIG_CRYPTO_HMAC=y diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 43e234c518b1..714abe494e5f 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h @@ -36,8 +36,6 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long); /* Free all resources held by a thread. */ struct task_struct; -extern void release_thread(struct task_struct *); - unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index e2e25f8b5e76..dbf1bc5e2ad2 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -225,11 +225,6 @@ flush_thread(void) current_thread_info()->pcb.unique = 0; } -void -release_thread(struct task_struct *dead_task) -{ -} - /* * Copy architecture-specific thread state */ diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b4fbbba30aa2..33bf3a627002 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -491,9 +491,9 @@ setup_arch(char **cmdline_p) boot flags depending on the boot mode, we need some shorthand. This should do for installation. */ if (strcmp(COMMAND_LINE, "INSTALL") == 0) { - strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line); + strscpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof(command_line)); } else { - strlcpy(command_line, COMMAND_LINE, sizeof command_line); + strscpy(command_line, COMMAND_LINE, sizeof(command_line)); } strcpy(boot_command_line, command_line); *cmdline_p = command_line; diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index d93b65008d4a..4a94d1684ed6 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -90,7 +90,7 @@ CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_INSTALL=y diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 54db9d7bb562..fb844fce1ab6 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -43,9 +43,6 @@ struct task_struct; #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) -/* Free all resources held by a thread */ -#define release_thread(thread) do { } while (0) - /* * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise * get optimised away by gcc diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index bdc35c0e8dfb..326864f79d18 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -81,9 +81,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, /* Forward declaration, a strange C thing */ struct task_struct; -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - unsigned long __get_wchan(struct task_struct *p); #define task_pt_regs(p) \ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 96f3fbd51764..fc30df88ffbe 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -232,10 +232,6 @@ void flush_thread(void) thread_notify(THREAD_NOTIFY_FLUSH, thread); } -void release_thread(struct task_struct *dead_task) -{ -} - asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 61883518fc50..445aa3af3b76 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -323,9 +323,6 @@ static inline bool is_ttbr1_addr(unsigned long addr) /* Forward declaration, a strange C thing */ struct task_struct; -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - unsigned long __get_wchan(struct task_struct *p); void update_sctlr_el1(u64 sctlr); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 92bcc1768f0b..9015f49c206e 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -279,10 +279,6 @@ void flush_thread(void) flush_tagged_addr_state(); } -void release_thread(struct task_struct *dead_task) -{ -} - void arch_release_task_struct(struct task_struct *tsk) { fpsimd_release_task(tsk); diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 9638206bc44f..63ad71fab30d 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -69,11 +69,6 @@ do { \ /* Forward declaration, a strange C thing */ struct task_struct; -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - /* Prepare to copy thread state - unlazy all lazy status */ #define prepare_to_copy(tsk) do { } while (0) diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h index 615f7e49968e..0cd39c2cdf8f 100644 --- a/arch/hexagon/include/asm/processor.h +++ b/arch/hexagon/include/asm/processor.h @@ -60,10 +60,6 @@ struct thread_struct { #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk))) #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk))) -/* Free all resources held by a thread; defined in process.c */ -extern void release_thread(struct task_struct *dead_task); - -/* Get wait channel for task P. */ extern unsigned long __get_wchan(struct task_struct *p); /* The following stuff is pretty HEXAGON specific. */ diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index f0552f98a7ba..e15eeaebd785 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -113,13 +113,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) } /* - * Release any architecture-specific resources locked by thread - */ -void release_thread(struct task_struct *dead_task) -{ -} - -/* * Some archs flush debug and FPU info here */ void flush_thread(void) diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index a3724882295c..3e1337aceb37 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -20,7 +20,6 @@ CONFIG_UNIX=y CONFIG_INET=y # CONFIG_IPV6 is not set CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m CONFIG_ATA=m @@ -91,7 +90,6 @@ CONFIG_NFS_V4=m CONFIG_NFSD=m CONFIG_NFSD_V4=y CONFIG_CIFS=m -CONFIG_CIFS_STATS=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y CONFIG_NLS_CODEPAGE_437=y diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index a3dff482a3d7..f8033bacea89 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -39,7 +39,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_CONNECTOR=y # CONFIG_PNP_DEBUG_MESSAGES is not set CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_SGI_XP=m @@ -91,7 +90,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y -CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m CONFIG_AGP_I460=m diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 4cd46105b020..ffebe6c503f5 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -31,11 +31,9 @@ CONFIG_IP_MULTICAST=y CONFIG_SYN_COOKIES=y # CONFIG_IPV6 is not set CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_ATA=y -CONFIG_BLK_DEV_IDECD=y CONFIG_ATA_GENERIC=y CONFIG_PATA_CMD64X=y CONFIG_ATA_PIIX=y @@ -81,7 +79,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y -CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m CONFIG_AGP_I460=m diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index a2045d73adfa..45f5d6e2da0a 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig @@ -36,7 +36,6 @@ CONFIG_IP_MULTICAST=y CONFIG_SYN_COOKIES=y # CONFIG_IPV6 is not set CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_ATA=y @@ -85,7 +84,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y # CONFIG_HW_RANDOM is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y -CONFIG_RAW_DRIVER=m CONFIG_HPET=y CONFIG_AGP=m CONFIG_AGP_I460=m diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 99f8b2a0332b..ed104550d0d5 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig @@ -30,7 +30,6 @@ CONFIG_PATA_CMD64X=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y -CONFIG_CHR_DEV_OSST=y CONFIG_BLK_DEV_SR=y CONFIG_CHR_DEV_SG=y CONFIG_SCSI_CONSTANTS=y diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 757c2f6d8d4b..d1978e004054 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -318,13 +318,6 @@ struct thread_struct { struct mm_struct; struct task_struct; -/* - * Free all resources held by a thread. This is called after the - * parent of DEAD_TASK has collected the exit status of the task via - * wait(). - */ -#define release_thread(dead_task) - /* Get wait channel for task P. */ extern unsigned long __get_wchan (struct task_struct *p); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index c62a66710ad6..92ede80d17fe 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1793,7 +1793,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); - strncpy(p->comm, type, sizeof(p->comm)-1); + strscpy(p->comm, type, sizeof(p->comm)-1); } /* Caller prevents this from being called after init */ diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index fd6301eafa9d..c05728044272 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -552,7 +552,7 @@ setup_arch (char **cmdline_p) ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); *cmdline_p = __va(ia64_boot_param->command_line); - strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); efi_init(); io_port_init(); diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index e14db25146c2..215bf3f8cb20 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -166,3 +166,29 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u force_successful_syscall_return(); return addr; } + +asmlinkage long +ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp) +{ + /* + * ia64's clock_gettime() syscall is implemented as a vdso call + * fsys_clock_gettime(). Currently it handles only + * CLOCK_REALTIME and CLOCK_MONOTONIC. Both are based on + * 'ar.itc' counter which gets incremented at a constant + * frequency. It's usually 400MHz, ~2.5x times slower than CPU + * clock frequency. Which is almost a 1ns hrtimer, but not quite. + * + * Let's special-case these timers to report correct precision + * based on ITC frequency and not HZ frequency for supported + * clocks. + */ + switch (which_clock) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq); + struct timespec64 rtn_tp = ns_to_timespec64(tick_ns); + return put_timespec64(&rtn_tp, tp); + } + + return sys_clock_getres(which_clock, tp); +} diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index 78b1d03e86e1..72c929d9902b 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -240,7 +240,7 @@ 228 common timer_delete sys_timer_delete 229 common clock_settime sys_clock_settime 230 common clock_gettime sys_clock_gettime -231 common clock_getres sys_clock_getres +231 common clock_getres ia64_clock_getres 232 common clock_nanosleep sys_clock_nanosleep 233 common fstatfs64 sys_fstatfs64 234 common statfs64 sys_statfs64 diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index ab5373d0a24f..b01f5cdb27e0 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -1,5 +1,6 @@ obj-y += kernel/ obj-y += mm/ +obj-y += net/ obj-y += vdso/ # for cleaning diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index e83789b34861..903096bd87f8 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -50,6 +50,7 @@ config LOONGARCH select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_NO_INSTR @@ -61,6 +62,7 @@ config LOONGARCH select GENERIC_CPU_AUTOPROBE select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY + select GENERIC_IOREMAP if !ARCH_IOREMAP select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -69,6 +71,7 @@ config LOONGARCH select GENERIC_LIB_CMPDI2 select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_UCMPDI2 + select GENERIC_LIB_DEVMEM_IS_ALLOWED select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD @@ -83,6 +86,7 @@ config LOONGARCH select HAVE_CONTEXT_TRACKING_USER select HAVE_DEBUG_STACKOVERFLOW select HAVE_DMA_CONTIGUOUS + select HAVE_EBPF_JIT select HAVE_EXIT_THREAD select HAVE_FAST_GUP select HAVE_GENERIC_VDSO @@ -93,6 +97,8 @@ config LOONGARCH select HAVE_NMI select HAVE_PCI select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ select HAVE_SETUP_PER_CPU_AREA if NUMA @@ -136,6 +142,14 @@ config CPU_HAS_PREFETCH bool default y +config GENERIC_BUG + def_bool y + depends on BUG + +config GENERIC_BUG_RELATIVE_POINTERS + def_bool y + depends on GENERIC_BUG + config GENERIC_CALIBRATE_DELAY def_bool y @@ -157,7 +171,7 @@ config STACKTRACE_SUPPORT bool default y -# MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the +# MACH_LOONGSON32 and MACH_LOONGSON64 are deliberately carried over from the # MIPS Loongson code, to preserve Loongson-specific code paths in drivers that # are shared between architectures, and specifically expecting the symbols. config MACH_LOONGSON32 @@ -166,6 +180,9 @@ config MACH_LOONGSON32 config MACH_LOONGSON64 def_bool 64BIT +config FIX_EARLYCON_MEM + def_bool y + config PAGE_SIZE_4KB bool @@ -194,6 +211,9 @@ config SCHED_OMIT_FRAME_POINTER bool default y +config AS_HAS_EXPLICIT_RELOCS + def_bool $(as-instr,x:pcalau12i \$t0$(comma)%pc_hi20(x)) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -399,6 +419,46 @@ config ARCH_FORCE_MAX_ORDER The page size is not necessarily 4KB. Keep this in mind when choosing a value for this option. +config ARCH_IOREMAP + bool "Enable LoongArch DMW-based ioremap()" + help + We use generic TLB-based ioremap() by default since it has page + protection support. However, you can enable LoongArch DMW-based + ioremap() for better performance. + +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + +config CRASH_DUMP + bool "Build kdump crash kernel" + help + Generate crash dump after being started by kexec. This should + be normally only set in special crash dump kernels which are + loaded in the main kernel with kexec-tools into a specially + reserved region and then later executed after a crash by + kdump/kexec. + + For more details see Documentation/admin-guide/kdump/kdump.rst + +config PHYSICAL_START + hex "Physical address where the kernel is loaded" + default "0x90000000a0000000" + depends on CRASH_DUMP + help + This gives the XKPRANGE address where the kernel is loaded. + If you plan to use kernel for capturing the crash dump change + this value to start of the reserved region (the "X" value as + specified in the "crashkernel=YM@XM" command line boot parameter + passed to the panic-ed kernel). + config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" depends on PROC_FS diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index d592b9df95c4..f4cb54d5afd6 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -43,15 +43,37 @@ endif cflags-y += -G0 -pipe -msoft-float LDFLAGS_vmlinux += -G0 -static -n -nostdlib + +# When the assembler supports explicit relocation hint, we must use it. +# GCC may have -mexplicit-relocs off by default if it was built with an old +# assembler, so we force it via an option. +# +# When the assembler does not supports explicit relocation hint, we can't use +# it. Disable it if the compiler supports it. +# +# If you've seen "unknown reloc hint" message building the kernel and you are +# now wondering why "-mexplicit-relocs" is not wrapped with cc-option: the +# combination of a "new" assembler and "old" compiler is not supported. Either +# upgrade the compiler or downgrade the assembler. +ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS +cflags-y += -mexplicit-relocs +KBUILD_CFLAGS_KERNEL += -mdirect-extern-access +else +cflags-y += $(call cc-option,-mno-explicit-relocs) KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel KBUILD_CFLAGS_KERNEL += -Wa,-mla-global-with-pcrel KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs +endif cflags-y += -ffreestanding cflags-y += $(call cc-option, -mno-check-zero-division) +ifndef CONFIG_PHYSICAL_START load-y = 0x9000000000200000 +else +load-y = $(CONFIG_PHYSICAL_START) +endif bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) drivers-$(CONFIG_PCI) += arch/loongarch/pci/ diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 3712552e18d3..3540e9c0a631 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -4,6 +4,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y CONFIG_PREEMPT=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -45,6 +46,7 @@ CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=64 CONFIG_NUMA=y +CONFIG_KEXEC=y CONFIG_PAGE_SIZE_16KB=y CONFIG_HZ_250=y CONFIG_ACPI=y @@ -55,6 +57,7 @@ CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_EFI_ZBOOT=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m @@ -65,6 +68,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m @@ -82,8 +87,11 @@ CONFIG_ZSMALLOC=m CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y CONFIG_XFRM_USER=y CONFIG_NET_KEY=y +CONFIG_XDP_SOCKETS=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -95,6 +103,7 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m CONFIG_IP_MROUTE=y CONFIG_INET_ESP=m CONFIG_INET_UDP_DIAG=y @@ -102,6 +111,7 @@ CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BBR=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y +CONFIG_INET6_ESP=m CONFIG_IPV6_MROUTE=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y @@ -112,10 +122,11 @@ CONFIG_NF_LOG_NETDEV=m CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_TABLES=m -CONFIG_NFT_COUNTER=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -200,7 +211,6 @@ CONFIG_NF_TABLES_IPV4=y CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y -CONFIG_NF_LOG_ARP=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -254,10 +264,14 @@ CONFIG_BPFILTER=y CONFIG_IP_SCTP=m CONFIG_RDS=y CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_PRIO=m @@ -282,9 +296,33 @@ CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m CONFIG_NETLINK_DIAG=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m -# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_MTK=y +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_INTEL=y +CONFIG_BT_HCIUART_AG6XX=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_ATH3K=m +CONFIG_BT_VIRTIO=m CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -329,7 +367,6 @@ CONFIG_PARPORT_PC_FIFO=y CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_ZSTD=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 @@ -486,6 +523,7 @@ CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOE=m +CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m @@ -505,7 +543,6 @@ CONFIG_ATH9K_HTC=m CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_IWLWIFI_BCAST_FILTERING=y CONFIG_HOSTAP=m CONFIG_MT7601U=m CONFIG_RT2X00=m @@ -521,6 +558,14 @@ CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m +CONFIG_RTW88=m +CONFIG_RTW88_8822BE=m +CONFIG_RTW88_8822CE=m +CONFIG_RTW88_8723DE=m +CONFIG_RTW88_8821CE=m +CONFIG_RTW89=m +CONFIG_RTW89_8852AE=m +CONFIG_RTW89_8852CE=m CONFIG_ZD1211RW=m CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_INPUT_MOUSEDEV=y @@ -651,6 +696,11 @@ CONFIG_USB_SERIAL_FTDI_SIO=m CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m CONFIG_INFINIBAND=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y @@ -688,7 +738,6 @@ CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m CONFIG_STAGING=y CONFIG_R8188EU=m -# CONFIG_88EU_AP_MODE is not set CONFIG_PM_DEVFREQ=y CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y @@ -772,14 +821,12 @@ CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index f2bcfcb4e311..77ad8e6f0906 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,12 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 generic-y += dma-contiguous.h generic-y += export.h +generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h generic-y += qrwlock.h -generic-y += qrwlock_types.h -generic-y += spinlock.h -generic-y += spinlock_types.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += user.h diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h index 8e5881bc5ad1..ed0910e8b856 100644 --- a/arch/loongarch/include/asm/bootinfo.h +++ b/arch/loongarch/include/asm/bootinfo.h @@ -40,4 +40,9 @@ extern unsigned long fw_arg0, fw_arg1, fw_arg2; extern struct loongson_board_info b_info; extern struct loongson_system_configuration loongson_sysconf; +static inline bool io_master(int cpu) +{ + return test_bit(cpu, &loongson_sysconf.cores_io_master); +} + #endif /* _ASM_BOOTINFO_H */ diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h index bda49108a76d..d4ca3ba25418 100644 --- a/arch/loongarch/include/asm/bug.h +++ b/arch/loongarch/include/asm/bug.h @@ -2,21 +2,59 @@ #ifndef __ASM_BUG_H #define __ASM_BUG_H -#include <linux/compiler.h> +#include <asm/break.h> +#include <linux/stringify.h> + +#ifndef CONFIG_DEBUG_BUGVERBOSE +#define _BUGVERBOSE_LOCATION(file, line) +#else +#define __BUGVERBOSE_LOCATION(file, line) \ + .pushsection .rodata.str, "aMS", @progbits, 1; \ + 10002: .string file; \ + .popsection; \ + \ + .long 10002b - .; \ + .short line; +#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) +#endif -#ifdef CONFIG_BUG +#ifndef CONFIG_GENERIC_BUG +#define __BUG_ENTRY(flags) +#else +#define __BUG_ENTRY(flags) \ + .pushsection __bug_table, "aw"; \ + .align 2; \ + 10000: .long 10001f - .; \ + _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ + .short flags; \ + .popsection; \ + 10001: +#endif -#include <asm/break.h> +#define ASM_BUG_FLAGS(flags) \ + __BUG_ENTRY(flags) \ + break BRK_BUG -static inline void __noreturn BUG(void) -{ - __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); - unreachable(); -} +#define ASM_BUG() ASM_BUG_FLAGS(0) -#define HAVE_ARCH_BUG +#define __BUG_FLAGS(flags) \ + asm_inline volatile (__stringify(ASM_BUG_FLAGS(flags))); -#endif +#define __WARN_FLAGS(flags) \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ + instrumentation_end(); \ +} while (0) + +#define BUG() \ +do { \ + instrumentation_begin(); \ + __BUG_FLAGS(0); \ + unreachable(); \ +} while (0) + +#define HAVE_ARCH_BUG #include <asm-generic/bug.h> diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h index 670900141b7c..0681788eb474 100644 --- a/arch/loongarch/include/asm/cacheflush.h +++ b/arch/loongarch/include/asm/cacheflush.h @@ -6,10 +6,33 @@ #define _ASM_CACHEFLUSH_H #include <linux/mm.h> -#include <asm/cpu-features.h> +#include <asm/cpu-info.h> #include <asm/cacheops.h> -extern void local_flush_icache_range(unsigned long start, unsigned long end); +static inline bool cache_present(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRESENT; +} + +static inline bool cache_private(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_PRIVATE; +} + +static inline bool cache_inclusive(struct cache_desc *cdesc) +{ + return cdesc->flags & CACHE_INCLUSIVE; +} + +static inline unsigned int cpu_last_level_cache_line_size(void) +{ + int cache_present = boot_cpu_data.cache_leaves_present; + + return boot_cpu_data.cache_leaves[cache_present - 1].linesz; +} + +asmlinkage void __flush_cache_all(void); +void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_icache_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range @@ -35,44 +58,30 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end); : \ : "i" (op), "ZC" (*(unsigned char *)(addr))) -static inline void flush_icache_line_indexed(unsigned long addr) -{ - cache_op(Index_Invalidate_I, addr); -} - -static inline void flush_dcache_line_indexed(unsigned long addr) -{ - cache_op(Index_Writeback_Inv_D, addr); -} - -static inline void flush_vcache_line_indexed(unsigned long addr) -{ - cache_op(Index_Writeback_Inv_V, addr); -} - -static inline void flush_scache_line_indexed(unsigned long addr) -{ - cache_op(Index_Writeback_Inv_S, addr); -} - -static inline void flush_icache_line(unsigned long addr) -{ - cache_op(Hit_Invalidate_I, addr); -} - -static inline void flush_dcache_line(unsigned long addr) -{ - cache_op(Hit_Writeback_Inv_D, addr); -} - -static inline void flush_vcache_line(unsigned long addr) -{ - cache_op(Hit_Writeback_Inv_V, addr); -} - -static inline void flush_scache_line(unsigned long addr) +static inline void flush_cache_line(int leaf, unsigned long addr) { - cache_op(Hit_Writeback_Inv_S, addr); + switch (leaf) { + case Cache_LEAF0: + cache_op(Index_Writeback_Inv_LEAF0, addr); + break; + case Cache_LEAF1: + cache_op(Index_Writeback_Inv_LEAF1, addr); + break; + case Cache_LEAF2: + cache_op(Index_Writeback_Inv_LEAF2, addr); + break; + case Cache_LEAF3: + cache_op(Index_Writeback_Inv_LEAF3, addr); + break; + case Cache_LEAF4: + cache_op(Index_Writeback_Inv_LEAF4, addr); + break; + case Cache_LEAF5: + cache_op(Index_Writeback_Inv_LEAF5, addr); + break; + default: + break; + } } #include <asm-generic/cacheflush.h> diff --git a/arch/loongarch/include/asm/cacheops.h b/arch/loongarch/include/asm/cacheops.h index dc280efecebd..0f4a86f8e2be 100644 --- a/arch/loongarch/include/asm/cacheops.h +++ b/arch/loongarch/include/asm/cacheops.h @@ -8,16 +8,18 @@ #define __ASM_CACHEOPS_H /* - * Most cache ops are split into a 2 bit field identifying the cache, and a 3 + * Most cache ops are split into a 3 bit field identifying the cache, and a 2 * bit field identifying the cache operation. */ -#define CacheOp_Cache 0x03 -#define CacheOp_Op 0x1c +#define CacheOp_Cache 0x07 +#define CacheOp_Op 0x18 -#define Cache_I 0x00 -#define Cache_D 0x01 -#define Cache_V 0x02 -#define Cache_S 0x03 +#define Cache_LEAF0 0x00 +#define Cache_LEAF1 0x01 +#define Cache_LEAF2 0x02 +#define Cache_LEAF3 0x03 +#define Cache_LEAF4 0x04 +#define Cache_LEAF5 0x05 #define Index_Invalidate 0x08 #define Index_Writeback_Inv 0x08 @@ -25,13 +27,17 @@ #define Hit_Writeback_Inv 0x10 #define CacheOp_User_Defined 0x18 -#define Index_Invalidate_I (Cache_I | Index_Invalidate) -#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv) -#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv) -#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv) -#define Hit_Invalidate_I (Cache_I | Hit_Invalidate) -#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv) -#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv) -#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv) +#define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv) +#define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv) +#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv) #endif /* __ASM_CACHEOPS_H */ diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index ae19e33c7754..ecfa6cf79806 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -61,8 +61,8 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, return (old32 & mask) >> shift; } -static inline unsigned long __xchg(volatile void *ptr, unsigned long x, - int size) +static __always_inline unsigned long +__xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { case 1: @@ -159,8 +159,8 @@ static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, return (old32 & mask) >> shift; } -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, unsigned int size) +static __always_inline unsigned long +__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { case 1: diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index a8d87c40a0eb..b07974218393 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -19,11 +19,6 @@ #define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) -#define cpu_icache_line_size() cpu_data[0].icache.linesz -#define cpu_dcache_line_size() cpu_data[0].dcache.linesz -#define cpu_vcache_line_size() cpu_data[0].vcache.linesz -#define cpu_scache_line_size() cpu_data[0].scache.linesz - #ifdef CONFIG_32BIT # define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_vabits 31 diff --git a/arch/loongarch/include/asm/cpu-info.h b/arch/loongarch/include/asm/cpu-info.h index b6c4f96079df..cd73a6f57fe3 100644 --- a/arch/loongarch/include/asm/cpu-info.h +++ b/arch/loongarch/include/asm/cpu-info.h @@ -10,18 +10,28 @@ #include <asm/loongarch.h> +/* cache_desc->flags */ +enum { + CACHE_PRESENT = (1 << 0), + CACHE_PRIVATE = (1 << 1), /* core private cache */ + CACHE_INCLUSIVE = (1 << 2), /* include the inner level caches */ +}; + /* * Descriptor for a cache */ struct cache_desc { - unsigned int waysize; /* Bytes per way */ + unsigned char type; + unsigned char level; unsigned short sets; /* Number of lines per set */ unsigned char ways; /* Number of ways */ unsigned char linesz; /* Size of line in bytes */ - unsigned char waybit; /* Bits to select in a cache set */ unsigned char flags; /* Flags describing cache properties */ }; +#define CACHE_LEVEL_MAX 3 +#define CACHE_LEAVES_MAX 6 + struct cpuinfo_loongarch { u64 asid_cache; unsigned long asid_mask; @@ -40,11 +50,8 @@ struct cpuinfo_loongarch { int tlbsizemtlb; int tlbsizestlbsets; int tlbsizestlbways; - struct cache_desc icache; /* Primary I-cache */ - struct cache_desc dcache; /* Primary D or combined I/D cache */ - struct cache_desc vcache; /* Victim cache, between pcache and scache */ - struct cache_desc scache; /* Secondary cache */ - struct cache_desc tcache; /* Tertiary/split secondary cache */ + int cache_leaves_present; /* number of cache_leaves[] elements */ + struct cache_desc cache_leaves[CACHE_LEAVES_MAX]; int core; /* physical core number in package */ int package;/* physical package number */ int vabits; /* Virtual Address size in bits */ diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h index 5f3ff4781fda..7af0cebf28d7 100644 --- a/arch/loongarch/include/asm/elf.h +++ b/arch/loongarch/include/asm/elf.h @@ -74,6 +74,43 @@ #define R_LARCH_SUB64 56 #define R_LARCH_GNU_VTINHERIT 57 #define R_LARCH_GNU_VTENTRY 58 +#define R_LARCH_B16 64 +#define R_LARCH_B21 65 +#define R_LARCH_B26 66 +#define R_LARCH_ABS_HI20 67 +#define R_LARCH_ABS_LO12 68 +#define R_LARCH_ABS64_LO20 69 +#define R_LARCH_ABS64_HI12 70 +#define R_LARCH_PCALA_HI20 71 +#define R_LARCH_PCALA_LO12 72 +#define R_LARCH_PCALA64_LO20 73 +#define R_LARCH_PCALA64_HI12 74 +#define R_LARCH_GOT_PC_HI20 75 +#define R_LARCH_GOT_PC_LO12 76 +#define R_LARCH_GOT64_PC_LO20 77 +#define R_LARCH_GOT64_PC_HI12 78 +#define R_LARCH_GOT_HI20 79 +#define R_LARCH_GOT_LO12 80 +#define R_LARCH_GOT64_LO20 81 +#define R_LARCH_GOT64_HI12 82 +#define R_LARCH_TLS_LE_HI20 83 +#define R_LARCH_TLS_LE_LO12 84 +#define R_LARCH_TLS_LE64_LO20 85 +#define R_LARCH_TLS_LE64_HI12 86 +#define R_LARCH_TLS_IE_PC_HI20 87 +#define R_LARCH_TLS_IE_PC_LO12 88 +#define R_LARCH_TLS_IE64_PC_LO20 89 +#define R_LARCH_TLS_IE64_PC_HI12 90 +#define R_LARCH_TLS_IE_HI20 91 +#define R_LARCH_TLS_IE_LO12 92 +#define R_LARCH_TLS_IE64_LO20 93 +#define R_LARCH_TLS_IE64_HI12 94 +#define R_LARCH_TLS_LD_PC_HI20 95 +#define R_LARCH_TLS_LD_HI20 96 +#define R_LARCH_TLS_GD_PC_HI20 97 +#define R_LARCH_TLS_GD_HI20 98 +#define R_LARCH_32_PCREL 99 +#define R_LARCH_RELAX 100 #ifndef ELF_ARCH diff --git a/arch/loongarch/include/asm/fixmap.h b/arch/loongarch/include/asm/fixmap.h index b3541dfa2013..d2e55ae55bb9 100644 --- a/arch/loongarch/include/asm/fixmap.h +++ b/arch/loongarch/include/asm/fixmap.h @@ -10,4 +10,19 @@ #define NR_FIX_BTMAPS 64 +enum fixed_addresses { + FIX_HOLE, + FIX_EARLYCON_MEM_BASE, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXMAP_PAGE_IO PAGE_KERNEL_SUC + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#include <asm-generic/fixmap.h> + #endif diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 7b07cbb3188c..fce1843ceebb 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -8,6 +8,8 @@ #include <linux/types.h> #include <asm/asm.h> +#define INSN_BREAK 0x002a0000 + #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 #define ADDR_IMMMASK_ADDU16ID 0x00000000FFFF0000 @@ -18,9 +20,16 @@ #define ADDR_IMM(addr, INSN) ((addr & ADDR_IMMMASK_##INSN) >> ADDR_IMMSHIFT_##INSN) +enum reg0i26_op { + b_op = 0x14, + bl_op = 0x15, +}; + enum reg1i20_op { lu12iw_op = 0x0a, lu32id_op = 0x0b, + pcaddu12i_op = 0x0e, + pcaddu18i_op = 0x0f, }; enum reg1i21_op { @@ -28,10 +37,34 @@ enum reg1i21_op { bnez_op = 0x11, }; +enum reg2_op { + revb2h_op = 0x0c, + revb4h_op = 0x0d, + revb2w_op = 0x0e, + revbd_op = 0x0f, + revh2w_op = 0x10, + revhd_op = 0x11, +}; + +enum reg2i5_op { + slliw_op = 0x81, + srliw_op = 0x89, + sraiw_op = 0x91, +}; + +enum reg2i6_op { + sllid_op = 0x41, + srlid_op = 0x45, + sraid_op = 0x49, +}; + enum reg2i12_op { addiw_op = 0x0a, addid_op = 0x0b, lu52id_op = 0x0c, + andi_op = 0x0d, + ori_op = 0x0e, + xori_op = 0x0f, ldb_op = 0xa0, ldh_op = 0xa1, ldw_op = 0xa2, @@ -40,6 +73,20 @@ enum reg2i12_op { sth_op = 0xa5, stw_op = 0xa6, std_op = 0xa7, + ldbu_op = 0xa8, + ldhu_op = 0xa9, + ldwu_op = 0xaa, +}; + +enum reg2i14_op { + llw_op = 0x20, + scw_op = 0x21, + lld_op = 0x22, + scd_op = 0x23, + ldptrw_op = 0x24, + stptrw_op = 0x25, + ldptrd_op = 0x26, + stptrd_op = 0x27, }; enum reg2i16_op { @@ -52,6 +99,71 @@ enum reg2i16_op { bgeu_op = 0x1b, }; +enum reg2bstrd_op { + bstrinsd_op = 0x2, + bstrpickd_op = 0x3, +}; + +enum reg3_op { + addw_op = 0x20, + addd_op = 0x21, + subw_op = 0x22, + subd_op = 0x23, + nor_op = 0x28, + and_op = 0x29, + or_op = 0x2a, + xor_op = 0x2b, + orn_op = 0x2c, + andn_op = 0x2d, + sllw_op = 0x2e, + srlw_op = 0x2f, + sraw_op = 0x30, + slld_op = 0x31, + srld_op = 0x32, + srad_op = 0x33, + mulw_op = 0x38, + mulhw_op = 0x39, + mulhwu_op = 0x3a, + muld_op = 0x3b, + mulhd_op = 0x3c, + mulhdu_op = 0x3d, + divw_op = 0x40, + modw_op = 0x41, + divwu_op = 0x42, + modwu_op = 0x43, + divd_op = 0x44, + modd_op = 0x45, + divdu_op = 0x46, + moddu_op = 0x47, + ldxb_op = 0x7000, + ldxh_op = 0x7008, + ldxw_op = 0x7010, + ldxd_op = 0x7018, + stxb_op = 0x7020, + stxh_op = 0x7028, + stxw_op = 0x7030, + stxd_op = 0x7038, + ldxbu_op = 0x7040, + ldxhu_op = 0x7048, + ldxwu_op = 0x7050, + amswapw_op = 0x70c0, + amswapd_op = 0x70c1, + amaddw_op = 0x70c2, + amaddd_op = 0x70c3, + amandw_op = 0x70c4, + amandd_op = 0x70c5, + amorw_op = 0x70c6, + amord_op = 0x70c7, + amxorw_op = 0x70c8, + amxord_op = 0x70c9, +}; + +enum reg3sa2_op { + alslw_op = 0x02, + alslwu_op = 0x03, + alsld_op = 0x16, +}; + struct reg0i26_format { unsigned int immediate_h : 10; unsigned int immediate_l : 16; @@ -71,6 +183,26 @@ struct reg1i21_format { unsigned int opcode : 6; }; +struct reg2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int opcode : 22; +}; + +struct reg2i5_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 5; + unsigned int opcode : 17; +}; + +struct reg2i6_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 6; + unsigned int opcode : 16; +}; + struct reg2i12_format { unsigned int rd : 5; unsigned int rj : 5; @@ -78,6 +210,13 @@ struct reg2i12_format { unsigned int opcode : 10; }; +struct reg2i14_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 14; + unsigned int opcode : 8; +}; + struct reg2i16_format { unsigned int rd : 5; unsigned int rj : 5; @@ -85,13 +224,43 @@ struct reg2i16_format { unsigned int opcode : 6; }; +struct reg2bstrd_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int lsbd : 6; + unsigned int msbd : 6; + unsigned int opcode : 10; +}; + +struct reg3_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int opcode : 17; +}; + +struct reg3sa2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int rk : 5; + unsigned int immediate : 2; + unsigned int opcode : 15; +}; + union loongarch_instruction { unsigned int word; - struct reg0i26_format reg0i26_format; - struct reg1i20_format reg1i20_format; - struct reg1i21_format reg1i21_format; - struct reg2i12_format reg2i12_format; - struct reg2i16_format reg2i16_format; + struct reg0i26_format reg0i26_format; + struct reg1i20_format reg1i20_format; + struct reg1i21_format reg1i21_format; + struct reg2_format reg2_format; + struct reg2i5_format reg2i5_format; + struct reg2i6_format reg2i6_format; + struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; + struct reg2i16_format reg2i16_format; + struct reg2bstrd_format reg2bstrd_format; + struct reg3_format reg3_format; + struct reg3sa2_format reg3sa2_format; }; #define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) @@ -166,4 +335,235 @@ u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm); u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm); u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest); +static inline bool signed_imm_check(long val, unsigned int bit) +{ + return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1)); +} + +static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) +{ + return val < (1UL << bit); +} + +#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + int offset) \ +{ \ + unsigned int immediate_l, immediate_h; \ + \ + immediate_l = offset & 0xffff; \ + offset >>= 16; \ + immediate_h = offset & 0x3ff; \ + \ + insn->reg0i26_format.opcode = OP; \ + insn->reg0i26_format.immediate_l = immediate_l; \ + insn->reg0i26_format.immediate_h = immediate_h; \ +} + +DEF_EMIT_REG0I26_FORMAT(b, b_op) + +#define DEF_EMIT_REG1I20_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, int imm) \ +{ \ + insn->reg1i20_format.opcode = OP; \ + insn->reg1i20_format.immediate = imm; \ + insn->reg1i20_format.rd = rd; \ +} + +DEF_EMIT_REG1I20_FORMAT(lu12iw, lu12iw_op) +DEF_EMIT_REG1I20_FORMAT(lu32id, lu32id_op) +DEF_EMIT_REG1I20_FORMAT(pcaddu18i, pcaddu18i_op) + +#define DEF_EMIT_REG2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj) \ +{ \ + insn->reg2_format.opcode = OP; \ + insn->reg2_format.rd = rd; \ + insn->reg2_format.rj = rj; \ +} + +DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op) +DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op) +DEF_EMIT_REG2_FORMAT(revbd, revbd_op) + +#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i5_format.opcode = OP; \ + insn->reg2i5_format.immediate = imm; \ + insn->reg2i5_format.rd = rd; \ + insn->reg2i5_format.rj = rj; \ +} + +DEF_EMIT_REG2I5_FORMAT(slliw, slliw_op) +DEF_EMIT_REG2I5_FORMAT(srliw, srliw_op) +DEF_EMIT_REG2I5_FORMAT(sraiw, sraiw_op) + +#define DEF_EMIT_REG2I6_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i6_format.opcode = OP; \ + insn->reg2i6_format.immediate = imm; \ + insn->reg2i6_format.rd = rd; \ + insn->reg2i6_format.rj = rj; \ +} + +DEF_EMIT_REG2I6_FORMAT(sllid, sllid_op) +DEF_EMIT_REG2I6_FORMAT(srlid, srlid_op) +DEF_EMIT_REG2I6_FORMAT(sraid, sraid_op) + +#define DEF_EMIT_REG2I12_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i12_format.opcode = OP; \ + insn->reg2i12_format.immediate = imm; \ + insn->reg2i12_format.rd = rd; \ + insn->reg2i12_format.rj = rj; \ +} + +DEF_EMIT_REG2I12_FORMAT(addiw, addiw_op) +DEF_EMIT_REG2I12_FORMAT(addid, addid_op) +DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op) +DEF_EMIT_REG2I12_FORMAT(andi, andi_op) +DEF_EMIT_REG2I12_FORMAT(ori, ori_op) +DEF_EMIT_REG2I12_FORMAT(xori, xori_op) +DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op) +DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op) +DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op) +DEF_EMIT_REG2I12_FORMAT(ldd, ldd_op) +DEF_EMIT_REG2I12_FORMAT(stb, stb_op) +DEF_EMIT_REG2I12_FORMAT(sth, sth_op) +DEF_EMIT_REG2I12_FORMAT(stw, stw_op) +DEF_EMIT_REG2I12_FORMAT(std, std_op) + +#define DEF_EMIT_REG2I14_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int imm) \ +{ \ + insn->reg2i14_format.opcode = OP; \ + insn->reg2i14_format.immediate = imm; \ + insn->reg2i14_format.rd = rd; \ + insn->reg2i14_format.rj = rj; \ +} + +DEF_EMIT_REG2I14_FORMAT(llw, llw_op) +DEF_EMIT_REG2I14_FORMAT(scw, scw_op) +DEF_EMIT_REG2I14_FORMAT(lld, lld_op) +DEF_EMIT_REG2I14_FORMAT(scd, scd_op) +DEF_EMIT_REG2I14_FORMAT(ldptrw, ldptrw_op) +DEF_EMIT_REG2I14_FORMAT(stptrw, stptrw_op) +DEF_EMIT_REG2I14_FORMAT(ldptrd, ldptrd_op) +DEF_EMIT_REG2I14_FORMAT(stptrd, stptrd_op) + +#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rd, \ + int offset) \ +{ \ + insn->reg2i16_format.opcode = OP; \ + insn->reg2i16_format.immediate = offset; \ + insn->reg2i16_format.rj = rj; \ + insn->reg2i16_format.rd = rd; \ +} + +DEF_EMIT_REG2I16_FORMAT(beq, beq_op) +DEF_EMIT_REG2I16_FORMAT(bne, bne_op) +DEF_EMIT_REG2I16_FORMAT(blt, blt_op) +DEF_EMIT_REG2I16_FORMAT(bge, bge_op) +DEF_EMIT_REG2I16_FORMAT(bltu, bltu_op) +DEF_EMIT_REG2I16_FORMAT(bgeu, bgeu_op) +DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +#define DEF_EMIT_REG2BSTRD_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + int msbd, \ + int lsbd) \ +{ \ + insn->reg2bstrd_format.opcode = OP; \ + insn->reg2bstrd_format.msbd = msbd; \ + insn->reg2bstrd_format.lsbd = lsbd; \ + insn->reg2bstrd_format.rj = rj; \ + insn->reg2bstrd_format.rd = rd; \ +} + +DEF_EMIT_REG2BSTRD_FORMAT(bstrpickd, bstrpickd_op) + +#define DEF_EMIT_REG3_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk) \ +{ \ + insn->reg3_format.opcode = OP; \ + insn->reg3_format.rd = rd; \ + insn->reg3_format.rj = rj; \ + insn->reg3_format.rk = rk; \ +} + +DEF_EMIT_REG3_FORMAT(addd, addd_op) +DEF_EMIT_REG3_FORMAT(subd, subd_op) +DEF_EMIT_REG3_FORMAT(muld, muld_op) +DEF_EMIT_REG3_FORMAT(divdu, divdu_op) +DEF_EMIT_REG3_FORMAT(moddu, moddu_op) +DEF_EMIT_REG3_FORMAT(and, and_op) +DEF_EMIT_REG3_FORMAT(or, or_op) +DEF_EMIT_REG3_FORMAT(xor, xor_op) +DEF_EMIT_REG3_FORMAT(sllw, sllw_op) +DEF_EMIT_REG3_FORMAT(slld, slld_op) +DEF_EMIT_REG3_FORMAT(srlw, srlw_op) +DEF_EMIT_REG3_FORMAT(srld, srld_op) +DEF_EMIT_REG3_FORMAT(sraw, sraw_op) +DEF_EMIT_REG3_FORMAT(srad, srad_op) +DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op) +DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op) +DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op) +DEF_EMIT_REG3_FORMAT(ldxd, ldxd_op) +DEF_EMIT_REG3_FORMAT(stxb, stxb_op) +DEF_EMIT_REG3_FORMAT(stxh, stxh_op) +DEF_EMIT_REG3_FORMAT(stxw, stxw_op) +DEF_EMIT_REG3_FORMAT(stxd, stxd_op) +DEF_EMIT_REG3_FORMAT(amaddw, amaddw_op) +DEF_EMIT_REG3_FORMAT(amaddd, amaddd_op) +DEF_EMIT_REG3_FORMAT(amandw, amandw_op) +DEF_EMIT_REG3_FORMAT(amandd, amandd_op) +DEF_EMIT_REG3_FORMAT(amorw, amorw_op) +DEF_EMIT_REG3_FORMAT(amord, amord_op) +DEF_EMIT_REG3_FORMAT(amxorw, amxorw_op) +DEF_EMIT_REG3_FORMAT(amxord, amxord_op) +DEF_EMIT_REG3_FORMAT(amswapw, amswapw_op) +DEF_EMIT_REG3_FORMAT(amswapd, amswapd_op) + +#define DEF_EMIT_REG3SA2_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rd, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rk, \ + int imm) \ +{ \ + insn->reg3sa2_format.opcode = OP; \ + insn->reg3sa2_format.immediate = imm; \ + insn->reg3sa2_format.rd = rd; \ + insn->reg3sa2_format.rj = rj; \ + insn->reg3sa2_format.rk = rk; \ +} + +DEF_EMIT_REG3SA2_FORMAT(alsld, alsld_op) + #endif /* _ASM_INST_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 999944ea1cea..402a7d9e3a53 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -27,71 +27,38 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); #define early_memremap early_ioremap #define early_memunmap early_iounmap +#ifdef CONFIG_ARCH_IOREMAP + static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long prot_val) { - if (prot_val == _CACHE_CC) + if (prot_val & _CACHE_CC) return (void __iomem *)(unsigned long)(CACHE_BASE + offset); else return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); } -/* - * ioremap - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - */ -#define ioremap(offset, size) \ - ioremap_prot((offset), (size), _CACHE_SUC) +#define ioremap(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) -/* - * ioremap_wc - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap_wc performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * This version of ioremap ensures that the memory is marked uncachable - * but accelerated by means of write-combining feature. It is specifically - * useful for PCIe prefetchable windows, which may vastly improve a - * communications performance. If it was determined on boot stage, what - * CPU CCA doesn't support WUC, the method shall fall-back to the - * _CACHE_SUC option (see cpu_probe() method). - */ -#define ioremap_wc(offset, size) \ - ioremap_prot((offset), (size), _CACHE_WUC) +#define iounmap(addr) ((void)(addr)) + +#endif /* - * ioremap_cache - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map + * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache(). + * They map bus memory into CPU space, the mapped memory is marked uncachable + * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and + * cachable (_CACHE_CC) respectively for CPU access. * - * ioremap_cache performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * This version of ioremap ensures that the memory is marked cachable by - * the CPU. Also enables full write-combining. Useful for some - * memory-like regions on I/O busses. + * @offset: bus address of the memory + * @size: size of the resource to map */ -#define ioremap_cache(offset, size) \ - ioremap_prot((offset), (size), _CACHE_CC) +#define ioremap_wc(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC)) -static inline void iounmap(const volatile void __iomem *addr) -{ -} +#define ioremap_cache(offset, size) \ + ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) #define mmiowb() asm volatile ("dbar 0" ::: "memory") @@ -107,4 +74,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t #include <asm-generic/io.h> +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + #endif /* _ASM_IO_H */ diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h new file mode 100644 index 000000000000..cf95cd3eb2de --- /dev/null +++ b/arch/loongarch/include/asm/kexec.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kexec.h for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_KEXEC_H +#define _ASM_KEXEC_H + +#include <asm/stacktrace.h> +#include <asm/page.h> + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + /* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(*newregs)); + else + prepare_frametrace(newregs); +} + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + unsigned long efi_boot; + unsigned long cmdline_ptr; + unsigned long systable_ptr; +}; + +typedef void (*do_kexec_t)(unsigned long efi_boot, + unsigned long cmdline_ptr, + unsigned long systable_ptr, + unsigned long start_addr, + unsigned long first_ind_entry); + +struct kimage; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; +extern void kexec_reboot(void); + +#ifdef CONFIG_SMP +extern atomic_t kexec_ready_to_reboot; +extern const unsigned char kexec_smp_wait[]; +#endif + +#endif /* !_ASM_KEXEC_H */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 3ba4f7e87cd2..7f8d57a61c8b 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -187,36 +187,15 @@ static inline u32 read_cpucfg(u32 reg) #define CPUCFG16_L3_DINCL BIT(16) #define LOONGARCH_CPUCFG17 0x11 -#define CPUCFG17_L1I_WAYS_M GENMASK(15, 0) -#define CPUCFG17_L1I_SETS_M GENMASK(23, 16) -#define CPUCFG17_L1I_SIZE_M GENMASK(30, 24) -#define CPUCFG17_L1I_WAYS 0 -#define CPUCFG17_L1I_SETS 16 -#define CPUCFG17_L1I_SIZE 24 - #define LOONGARCH_CPUCFG18 0x12 -#define CPUCFG18_L1D_WAYS_M GENMASK(15, 0) -#define CPUCFG18_L1D_SETS_M GENMASK(23, 16) -#define CPUCFG18_L1D_SIZE_M GENMASK(30, 24) -#define CPUCFG18_L1D_WAYS 0 -#define CPUCFG18_L1D_SETS 16 -#define CPUCFG18_L1D_SIZE 24 - #define LOONGARCH_CPUCFG19 0x13 -#define CPUCFG19_L2_WAYS_M GENMASK(15, 0) -#define CPUCFG19_L2_SETS_M GENMASK(23, 16) -#define CPUCFG19_L2_SIZE_M GENMASK(30, 24) -#define CPUCFG19_L2_WAYS 0 -#define CPUCFG19_L2_SETS 16 -#define CPUCFG19_L2_SIZE 24 - #define LOONGARCH_CPUCFG20 0x14 -#define CPUCFG20_L3_WAYS_M GENMASK(15, 0) -#define CPUCFG20_L3_SETS_M GENMASK(23, 16) -#define CPUCFG20_L3_SIZE_M GENMASK(30, 24) -#define CPUCFG20_L3_WAYS 0 -#define CPUCFG20_L3_SETS 16 -#define CPUCFG20_L3_SIZE 24 +#define CPUCFG_CACHE_WAYS_M GENMASK(15, 0) +#define CPUCFG_CACHE_SETS_M GENMASK(23, 16) +#define CPUCFG_CACHE_LSIZE_M GENMASK(30, 24) +#define CPUCFG_CACHE_WAYS 0 +#define CPUCFG_CACHE_SETS 16 +#define CPUCFG_CACHE_LSIZE 24 #define LOONGARCH_CPUCFG48 0x30 #define CPUCFG48_MCSR_LCK BIT(0) diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h index 9f6718df1854..b29b19a46f42 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -17,10 +17,15 @@ struct mod_section { }; struct mod_arch_specific { + struct mod_section got; struct mod_section plt; struct mod_section plt_idx; }; +struct got_entry { + Elf_Addr symbol_addr; +}; + struct plt_entry { u32 inst_lu12iw; u32 inst_lu32id; @@ -29,10 +34,16 @@ struct plt_entry { }; struct plt_idx_entry { - unsigned long symbol_addr; + Elf_Addr symbol_addr; }; -Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val); +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val); +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val); + +static inline struct got_entry emit_got_entry(Elf_Addr val) +{ + return (struct got_entry) { val }; +} static inline struct plt_entry emit_plt_entry(unsigned long val) { @@ -77,4 +88,16 @@ static inline struct plt_entry *get_plt_entry(unsigned long val, return plt + plt_idx; } +static inline struct got_entry *get_got_entry(Elf_Addr val, + const struct mod_section *sec) +{ + struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; + int i; + + for (i = 0; i < sec->num_entries; i++) + if (got[i].symbol_addr == val) + return &got[i]; + return NULL; +} + #endif /* _ASM_MODULE_H */ diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h index 31c1c0db11a3..a3d1bc0fcc72 100644 --- a/arch/loongarch/include/asm/module.lds.h +++ b/arch/loongarch/include/asm/module.lds.h @@ -2,6 +2,7 @@ /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ SECTIONS { . = ALIGN(4); + .got : { BYTE(0) } .plt : { BYTE(0) } .plt.idx : { BYTE(0) } } diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index 0bd6b0110198..ad8d88494554 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -8,6 +8,15 @@ #include <asm/cmpxchg.h> #include <asm/loongarch.h> +/* + * The "address" (in fact, offset from $r21) of a per-CPU variable is close to + * the loading address of main kernel image, but far from where the modules are + * loaded. Tell the compiler this fact when using explicit relocs. + */ +#if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) +#define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) +#endif + /* Use r21 for fast access */ register unsigned long __my_cpu_offset __asm__("$r21"); diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h index dcb3b17053a8..2a35a0bc2aaa 100644 --- a/arch/loongarch/include/asm/perf_event.h +++ b/arch/loongarch/include/asm/perf_event.h @@ -6,5 +6,7 @@ #ifndef __LOONGARCH_PERF_EVENT_H__ #define __LOONGARCH_PERF_EVENT_H__ -/* Nothing to show here; the file is required by linux/perf_event.h. */ + +#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs + #endif /* __LOONGARCH_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h index 9ca147a29bab..3d1e0a69975a 100644 --- a/arch/loongarch/include/asm/pgtable-bits.h +++ b/arch/loongarch/include/asm/pgtable-bits.h @@ -83,8 +83,11 @@ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_SUC) #define PAGE_KERNEL_WUC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _PAGE_GLOBAL | _PAGE_KERN | _CACHE_WUC) + #ifndef __ASSEMBLY__ +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_SUC) + #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t _prot) diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h index 1c4b4308378d..6954dc5d24e9 100644 --- a/arch/loongarch/include/asm/processor.h +++ b/arch/loongarch/include/asm/processor.h @@ -176,9 +176,6 @@ struct thread_struct { struct task_struct; -/* Free all resources held by a thread. */ -#define release_thread(thread) do { } while (0) - enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL}; extern unsigned long boot_option_idle_override; diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h index 6d7d2a3e23dd..ca373f8e3c4d 100644 --- a/arch/loongarch/include/asm/setup.h +++ b/arch/loongarch/include/asm/setup.h @@ -13,7 +13,9 @@ extern unsigned long eentry; extern unsigned long tlbrentry; +extern void tlb_init(int cpu); extern void cpu_cache_init(void); +extern void cache_error_setup(void); extern void per_cpu_trap_init(int cpu); extern void set_handler(unsigned long offset, void *addr, unsigned long len); extern void set_merr_handler(unsigned long offset, void *addr, unsigned long len); diff --git a/arch/loongarch/include/asm/spinlock.h b/arch/loongarch/include/asm/spinlock.h new file mode 100644 index 000000000000..7cb3476999be --- /dev/null +++ b/arch/loongarch/include/asm/spinlock.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_H +#define _ASM_SPINLOCK_H + +#include <asm/processor.h> +#include <asm/qspinlock.h> +#include <asm/qrwlock.h> + +#endif /* _ASM_SPINLOCK_H */ diff --git a/arch/loongarch/include/asm/spinlock_types.h b/arch/loongarch/include/asm/spinlock_types.h new file mode 100644 index 000000000000..7458d036c161 --- /dev/null +++ b/arch/loongarch/include/asm/spinlock_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_SPINLOCK_TYPES_H +#define _ASM_SPINLOCK_TYPES_H + +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> + +#endif diff --git a/arch/loongarch/include/uapi/asm/bpf_perf_event.h b/arch/loongarch/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..eb6e2fd2a1f0 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include <linux/ptrace.h> + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/loongarch/include/uapi/asm/perf_regs.h b/arch/loongarch/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..29d69c00fc7a --- /dev/null +++ b/arch/loongarch/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_LOONGARCH_PERF_REGS_H +#define _ASM_LOONGARCH_PERF_REGS_H + +enum perf_event_loongarch_regs { + PERF_REG_LOONGARCH_PC, + PERF_REG_LOONGARCH_R1, + PERF_REG_LOONGARCH_R2, + PERF_REG_LOONGARCH_R3, + PERF_REG_LOONGARCH_R4, + PERF_REG_LOONGARCH_R5, + PERF_REG_LOONGARCH_R6, + PERF_REG_LOONGARCH_R7, + PERF_REG_LOONGARCH_R8, + PERF_REG_LOONGARCH_R9, + PERF_REG_LOONGARCH_R10, + PERF_REG_LOONGARCH_R11, + PERF_REG_LOONGARCH_R12, + PERF_REG_LOONGARCH_R13, + PERF_REG_LOONGARCH_R14, + PERF_REG_LOONGARCH_R15, + PERF_REG_LOONGARCH_R16, + PERF_REG_LOONGARCH_R17, + PERF_REG_LOONGARCH_R18, + PERF_REG_LOONGARCH_R19, + PERF_REG_LOONGARCH_R20, + PERF_REG_LOONGARCH_R21, + PERF_REG_LOONGARCH_R22, + PERF_REG_LOONGARCH_R23, + PERF_REG_LOONGARCH_R24, + PERF_REG_LOONGARCH_R25, + PERF_REG_LOONGARCH_R26, + PERF_REG_LOONGARCH_R27, + PERF_REG_LOONGARCH_R28, + PERF_REG_LOONGARCH_R29, + PERF_REG_LOONGARCH_R30, + PERF_REG_LOONGARCH_R31, + PERF_REG_LOONGARCH_MAX, +}; +#endif /* _ASM_LOONGARCH_PERF_REGS_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 6c33b5c45573..42be564278fa 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -23,7 +23,14 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o + +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o + CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c index 4662b06269f4..c7988f757281 100644 --- a/arch/loongarch/kernel/cacheinfo.c +++ b/arch/loongarch/kernel/cacheinfo.c @@ -5,73 +5,34 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/cacheinfo.h> +#include <linux/topology.h> #include <asm/bootinfo.h> #include <asm/cpu-info.h> -/* Populates leaf and increments to next leaf */ -#define populate_cache(cache, leaf, c_level, c_type) \ -do { \ - leaf->type = c_type; \ - leaf->level = c_level; \ - leaf->coherency_line_size = c->cache.linesz; \ - leaf->number_of_sets = c->cache.sets; \ - leaf->ways_of_associativity = c->cache.ways; \ - leaf->size = c->cache.linesz * c->cache.sets * \ - c->cache.ways; \ - if (leaf->level > 2) \ - leaf->size *= nodes_per_package; \ - leaf++; \ -} while (0) - int init_cache_level(unsigned int cpu) { - struct cpuinfo_loongarch *c = ¤t_cpu_data; + int cache_present = current_cpu_data.cache_leaves_present; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - int levels = 0, leaves = 0; - - /* - * If Dcache is not set, we assume the cache structures - * are not properly initialized. - */ - if (c->dcache.waysize) - levels += 1; - else - return -ENOENT; - - - leaves += (c->icache.waysize) ? 2 : 1; - - if (c->vcache.waysize) { - levels++; - leaves++; - } - if (c->scache.waysize) { - levels++; - leaves++; - } + this_cpu_ci->num_levels = + current_cpu_data.cache_leaves[cache_present - 1].level; + this_cpu_ci->num_leaves = cache_present; - if (c->tcache.waysize) { - levels++; - leaves++; - } - - this_cpu_ci->num_levels = levels; - this_cpu_ci->num_leaves = leaves; return 0; } static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, struct cacheinfo *sib_leaf) { - return !((this_leaf->level == 1) || (this_leaf->level == 2)); + return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) + && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE)); } static void cache_cpumap_setup(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf, *sib_leaf; unsigned int index; + struct cacheinfo *this_leaf, *sib_leaf; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); for (index = 0; index < this_cpu_ci->num_leaves; index++) { unsigned int i; @@ -85,8 +46,10 @@ static void cache_cpumap_setup(unsigned int cpu) for_each_online_cpu(i) { struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); - if (i == cpu || !sib_cpu_ci->info_list) - continue;/* skip if itself or no cacheinfo */ + if (i == cpu || !sib_cpu_ci->info_list || + (cpu_to_node(i) != cpu_to_node(cpu))) + continue; + sib_leaf = sib_cpu_ci->info_list + index; if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); @@ -98,31 +61,24 @@ static void cache_cpumap_setup(unsigned int cpu) int populate_cache_leaves(unsigned int cpu) { - int level = 1, nodes_per_package = 1; - struct cpuinfo_loongarch *c = ¤t_cpu_data; + int i, cache_present = current_cpu_data.cache_leaves_present; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; - - if (loongson_sysconf.nr_nodes > 1) - nodes_per_package = loongson_sysconf.cores_per_package - / loongson_sysconf.cores_per_node; - - if (c->icache.waysize) { - populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA); - populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST); - } else { - populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED); + struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves; + + for (i = 0; i < cache_present; i++) { + cd = cdesc + i; + + this_leaf->type = cd->type; + this_leaf->level = cd->level; + this_leaf->coherency_line_size = cd->linesz; + this_leaf->number_of_sets = cd->sets; + this_leaf->ways_of_associativity = cd->ways; + this_leaf->size = cd->linesz * cd->sets * cd->ways; + this_leaf->priv = &cd->flags; + this_leaf++; } - if (c->vcache.waysize) - populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED); - - if (c->scache.waysize) - populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED); - - if (c->tcache.waysize) - populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED); - cache_cpumap_setup(cpu); this_cpu_ci->cpu_map_populated = true; diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 529ab8f44ec6..255a09876ef2 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -187,7 +187,9 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]); uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]); - __cpu_full_name[cpu] = cpu_full_name; + if (!__cpu_full_name[cpu]) + __cpu_full_name[cpu] = cpu_full_name; + *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR); *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); diff --git a/arch/loongarch/kernel/crash_dump.c b/arch/loongarch/kernel/crash_dump.c new file mode 100644 index 000000000000..e559307c1092 --- /dev/null +++ b/arch/loongarch/kernel/crash_dump.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/crash_dump.h> +#include <linux/io.h> +#include <linux/uio.h> + +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); + if (!vaddr) + return -ENOMEM; + + csize = copy_to_iter(vaddr + offset, csize, iter); + + memunmap(vaddr); + + return csize; +} diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 7e57ae8741b1..97425779ce9f 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -8,6 +8,7 @@ #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asmmacro.h> +#include <asm/bug.h> #include <asm/regdef.h> #include <asm/loongarch.h> #include <asm/stackframe.h> @@ -20,7 +21,11 @@ _head: .word MZ_MAGIC /* "MZ", MS-DOS header */ - .org 0x3c /* 0x04 ~ 0x3b reserved */ + .org 0x8 + .dword kernel_entry /* Kernel entry point */ + .dword _end - _text /* Kernel image effective size */ + .quad 0 /* Kernel image load offset from start of RAM */ + .org 0x3c /* 0x20 ~ 0x3b reserved */ .long pe_header - _head /* Offset to the PE header */ pe_header: @@ -57,19 +62,19 @@ SYM_CODE_START(kernel_entry) # kernel entry point li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0 csrwr t0, LOONGARCH_CSR_EUEN - la t0, __bss_start # clear .bss + la.pcrel t0, __bss_start # clear .bss st.d zero, t0, 0 - la t1, __bss_stop - LONGSIZE + la.pcrel t1, __bss_stop - LONGSIZE 1: addi.d t0, t0, LONGSIZE st.d zero, t0, 0 bne t0, t1, 1b - la t0, fw_arg0 + la.pcrel t0, fw_arg0 st.d a0, t0, 0 # firmware arguments - la t0, fw_arg1 + la.pcrel t0, fw_arg1 st.d a1, t0, 0 - la t0, fw_arg2 + la.pcrel t0, fw_arg2 st.d a2, t0, 0 /* KSave3 used for percpu base, initialized as 0 */ @@ -77,7 +82,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point /* GPR21 used for percpu base (runtime), initialized as 0 */ move u0, zero - la tp, init_thread_union + la.pcrel tp, init_thread_union /* Set the SP after an empty pt_regs. */ PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE) PTR_ADD sp, sp, tp @@ -85,6 +90,7 @@ SYM_CODE_START(kernel_entry) # kernel entry point PTR_ADDI sp, sp, -4 * SZREG # init stack pointer bl start_kernel + ASM_BUG() SYM_CODE_END(kernel_entry) @@ -116,6 +122,8 @@ SYM_CODE_START(smpboot_entry) ld.d tp, t0, CPU_BOOT_TINFO bl start_secondary + ASM_BUG() + SYM_CODE_END(smpboot_entry) #endif /* CONFIG_SMP */ diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c new file mode 100644 index 000000000000..2dcb9e003657 --- /dev/null +++ b/arch/loongarch/kernel/machine_kexec.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * machine_kexec.c for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/compiler.h> +#include <linux/cpu.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/libfdt.h> +#include <linux/mm.h> +#include <linux/of_fdt.h> +#include <linux/reboot.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> + +#include <asm/bootinfo.h> +#include <asm/cacheflush.h> +#include <asm/page.h> + +/* 0x100000 ~ 0x200000 is safe */ +#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL) +#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL) + +static unsigned long reboot_code_buffer; +static cpumask_t cpus_in_crash = CPU_MASK_NONE; + +#ifdef CONFIG_SMP +static void (*relocated_kexec_smp_wait)(void *); +atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); +#endif + +static unsigned long efi_boot; +static unsigned long cmdline_ptr; +static unsigned long systable_ptr; +static unsigned long start_addr; +static unsigned long first_ind_entry; + +static void kexec_image_info(const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("kexec kimage info:\n"); + pr_debug("\ttype: %d\n", kimage->type); + pr_debug("\tstart: %lx\n", kimage->start); + pr_debug("\thead: %lx\n", kimage->head); + pr_debug("\tnr_segments: %lu\n", kimage->nr_segments); + + for (i = 0; i < kimage->nr_segments; i++) { + pr_debug("\t segment[%lu]: %016lx - %016lx", i, + kimage->segment[i].mem, + kimage->segment[i].mem + kimage->segment[i].memsz); + pr_debug("\t\t0x%lx bytes, %lu pages\n", + (unsigned long)kimage->segment[i].memsz, + (unsigned long)kimage->segment[i].memsz / PAGE_SIZE); + } +} + +int machine_kexec_prepare(struct kimage *kimage) +{ + int i; + char *bootloader = "kexec"; + void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR; + + kexec_image_info(kimage); + + kimage->arch.efi_boot = fw_arg0; + kimage->arch.systable_ptr = fw_arg2; + + /* Find the command line */ + for (i = 0; i < kimage->nr_segments; i++) { + if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + break; + } + } + + if (!kimage->arch.cmdline_ptr) { + pr_err("Command line not included in the provided image\n"); + return -EINVAL; + } + + /* kexec/kdump need a safe page to save reboot_code_buffer */ + kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE); + + reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page); + memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + +#ifdef CONFIG_SMP + /* All secondary cpus now may jump to kexec_smp_wait cycle */ + relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel); +#endif + + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void kexec_reboot(void) +{ + do_kexec_t do_kexec = NULL; + + /* + * We know we were online, and there will be no incoming IPIs at + * this point. Mark online again before rebooting so that the crash + * analysis tool will see us correctly. + */ + set_cpu_online(smp_processor_id(), true); + + /* Ensure remote CPUs observe that we're online before rebooting. */ + smp_mb__after_atomic(); + + /* + * Make sure we get correct instructions written by the + * machine_kexec_prepare() CPU. + */ + __asm__ __volatile__ ("\tibar 0\n"::); + +#ifdef CONFIG_SMP + /* All secondary cpus go to kexec_smp_wait */ + if (smp_processor_id() > 0) { + relocated_kexec_smp_wait(NULL); + unreachable(); + } +#endif + + do_kexec = (void *)reboot_code_buffer; + do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry); + + unreachable(); +} + + +#ifdef CONFIG_SMP +static void kexec_shutdown_secondary(void *regs) +{ + int cpu = smp_processor_id(); + + if (!cpu_online(cpu)) + return; + + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + + local_irq_disable(); + while (!atomic_read(&kexec_ready_to_reboot)) + cpu_relax(); + + kexec_reboot(); +} + +static void crash_shutdown_secondary(void *passed_regs) +{ + int cpu = smp_processor_id(); + struct pt_regs *regs = passed_regs; + + /* + * If we are passed registers, use those. Otherwise get the + * regs from the last interrupt, which should be correct, as + * we are in an interrupt. But if the regs are not there, + * pull them from the top of the stack. They are probably + * wrong, but we need something to keep from crashing again. + */ + if (!regs) + regs = get_irq_regs(); + if (!regs) + regs = task_pt_regs(current); + + if (!cpu_online(cpu)) + return; + + /* We won't be sent IPIs any more. */ + set_cpu_online(cpu, false); + + local_irq_disable(); + if (!cpumask_test_cpu(cpu, &cpus_in_crash)) + crash_save_cpu(regs, cpu); + cpumask_set_cpu(cpu, &cpus_in_crash); + + while (!atomic_read(&kexec_ready_to_reboot)) + cpu_relax(); + + kexec_reboot(); +} + +void crash_smp_send_stop(void) +{ + unsigned int ncpus; + unsigned long timeout; + static int cpus_stopped; + + /* + * This function can be called twice in panic path, but obviously + * we should execute this only once. + */ + if (cpus_stopped) + return; + + cpus_stopped = 1; + + /* Excluding the panic cpu */ + ncpus = num_online_cpus() - 1; + + smp_call_function(crash_shutdown_secondary, NULL, 0); + smp_wmb(); + + /* + * The crash CPU sends an IPI and wait for other CPUs to + * respond. Delay of at least 10 seconds. + */ + timeout = MSEC_PER_SEC * 10; + pr_emerg("Sending IPI to other cpus...\n"); + while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) { + mdelay(1); + cpu_relax(); + } +} +#endif /* defined(CONFIG_SMP) */ + +void machine_shutdown(void) +{ + int cpu; + + /* All CPUs go to reboot_code_buffer */ + for_each_possible_cpu(cpu) + if (!cpu_online(cpu)) + cpu_device_up(get_cpu_device(cpu)); + +#ifdef CONFIG_SMP + smp_call_function(kexec_shutdown_secondary, NULL, 0); +#endif +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int crashing_cpu; + + local_irq_disable(); + + crashing_cpu = smp_processor_id(); + crash_save_cpu(regs, crashing_cpu); + +#ifdef CONFIG_SMP + crash_smp_send_stop(); +#endif + cpumask_set_cpu(crashing_cpu, &cpus_in_crash); + + pr_info("Starting crashdump kernel...\n"); +} + +void machine_kexec(struct kimage *image) +{ + unsigned long entry, *ptr; + struct kimage_arch *internal = &image->arch; + + efi_boot = internal->efi_boot; + cmdline_ptr = internal->cmdline_ptr; + systable_ptr = internal->systable_ptr; + + start_addr = (unsigned long)phys_to_virt(image->start); + + first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ? + (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0; + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through XKPRANGE + * hence the phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* Mark offline before disabling local irq. */ + set_cpu_online(smp_processor_id(), false); + + /* We do not want to be bothered. */ + local_irq_disable(); + + pr_notice("EFI boot flag 0x%lx\n", efi_boot); + pr_notice("Command line at 0x%lx\n", cmdline_ptr); + pr_notice("System table at 0x%lx\n", systable_ptr); + pr_notice("We will call new kernel at 0x%lx\n", start_addr); + pr_notice("Bye ...\n"); + + /* Make reboot code buffer available to the boot CPU. */ + flush_cache_all(); + +#ifdef CONFIG_SMP + atomic_set(&kexec_ready_to_reboot, 1); +#endif + + kexec_reboot(); +} diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index 7423361b0ebc..4a4107a6a965 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -58,7 +58,4 @@ void __init memblock_init(void) /* Reserve the kernel text/data/bss */ memblock_reserve(__pa_symbol(&_text), __pa_symbol(&_end) - __pa_symbol(&_text)); - - /* Reserve the initrd */ - reserve_initrd_mem(); } diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c index 6d498288977d..d296a70b758f 100644 --- a/arch/loongarch/kernel/module-sections.c +++ b/arch/loongarch/kernel/module-sections.c @@ -7,7 +7,33 @@ #include <linux/kernel.h> #include <linux/module.h> -Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val) +Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val) +{ + struct mod_section *got_sec = &mod->arch.got; + int i = got_sec->num_entries; + struct got_entry *got = get_got_entry(val, got_sec); + + if (got) + return (Elf_Addr)got; + + /* There is no GOT entry for val yet, create a new one. */ + got = (struct got_entry *)got_sec->shdr->sh_addr; + got[i] = emit_got_entry(val); + + got_sec->num_entries++; + if (got_sec->num_entries > got_sec->max_entries) { + /* + * This may happen when the module contains a GOT_HI20 without + * a paired GOT_LO12. Such a module is broken, reject it. + */ + pr_err("%s: module contains bad GOT relocation\n", mod->name); + return 0; + } + + return (Elf_Addr)&got[i]; +} + +Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val) { int nr; struct mod_section *plt_sec = &mod->arch.plt; @@ -50,15 +76,25 @@ static bool duplicate_rela(const Elf_Rela *rela, int idx) return false; } -static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts) +static void count_max_entries(Elf_Rela *relas, int num, + unsigned int *plts, unsigned int *gots) { unsigned int i, type; for (i = 0; i < num; i++) { type = ELF_R_TYPE(relas[i].r_info); - if (type == R_LARCH_SOP_PUSH_PLT_PCREL) { + switch (type) { + case R_LARCH_SOP_PUSH_PLT_PCREL: + case R_LARCH_B26: if (!duplicate_rela(relas, i)) (*plts)++; + break; + case R_LARCH_GOT_PC_HI20: + if (!duplicate_rela(relas, i)) + (*gots)++; + break; + default: + break; /* Do nothing. */ } } } @@ -66,18 +102,24 @@ static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts) int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { - unsigned int i, num_plts = 0; + unsigned int i, num_plts = 0, num_gots = 0; /* * Find the empty .plt sections. */ for (i = 0; i < ehdr->e_shnum; i++) { - if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) + if (!strcmp(secstrings + sechdrs[i].sh_name, ".got")) + mod->arch.got.shdr = sechdrs + i; + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) mod->arch.plt.shdr = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) mod->arch.plt_idx.shdr = sechdrs + i; } + if (!mod->arch.got.shdr) { + pr_err("%s: module GOT section(s) missing\n", mod->name); + return -ENOEXEC; + } if (!mod->arch.plt.shdr) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; @@ -100,9 +142,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, if (!(dst_sec->sh_flags & SHF_EXECINSTR)) continue; - count_max_entries(relas, num_rela, &num_plts); + count_max_entries(relas, num_rela, &num_plts, &num_gots); } + mod->arch.got.shdr->sh_type = SHT_NOBITS; + mod->arch.got.shdr->sh_flags = SHF_ALLOC; + mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; + mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); + mod->arch.got.num_entries = 0; + mod->arch.got.max_entries = num_gots; + mod->arch.plt.shdr->sh_type = SHT_NOBITS; mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index 638427ff0d51..097595b2fc14 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -18,16 +18,6 @@ #include <linux/string.h> #include <linux/kernel.h> -static inline bool signed_imm_check(long val, unsigned int bit) -{ - return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1)); -} - -static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) -{ - return val < (1UL << bit); -} - static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) { if (*rela_stack_top >= RELA_STACK_DEPTH) @@ -281,6 +271,96 @@ static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, } } +static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + ptrdiff_t offset = (void *)v - (void *)location; + union loongarch_instruction *insn = (union loongarch_instruction *)location; + + if (offset >= SZ_128M) + v = module_emit_plt_entry(mod, v); + + if (offset < -SZ_128M) + v = module_emit_plt_entry(mod, v); + + offset = (void *)v - (void *)location; + + if (offset & 3) { + pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n", + mod->name, (long long)offset, type); + return -ENOEXEC; + } + + if (!signed_imm_check(offset, 28)) { + pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n", + mod->name, (long long)offset, type); + return -ENOEXEC; + } + + offset >>= 2; + insn->reg0i26_format.immediate_l = offset & 0xffff; + insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff; + + return 0; +} + +static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + union loongarch_instruction *insn = (union loongarch_instruction *)location; + /* Use s32 for a sign-extension deliberately. */ + s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) - + (void *)((Elf_Addr)location & ~0xfff); + Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20; + ptrdiff_t offset_rem = (void *)v - (void *)anchor; + + switch (type) { + case R_LARCH_PCALA_LO12: + insn->reg2i12_format.immediate = v & 0xfff; + break; + case R_LARCH_PCALA_HI20: + v = offset_hi20 >> 12; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + case R_LARCH_PCALA64_LO20: + v = offset_rem >> 32; + insn->reg1i20_format.immediate = v & 0xfffff; + break; + case R_LARCH_PCALA64_HI12: + v = offset_rem >> 52; + insn->reg2i12_format.immediate = v & 0xfff; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return 0; +} + +static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v, + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) +{ + Elf_Addr got = module_emit_got_entry(mod, v); + + if (!got) + return -EINVAL; + + switch (type) { + case R_LARCH_GOT_PC_LO12: + type = R_LARCH_PCALA_LO12; + break; + case R_LARCH_GOT_PC_HI20: + type = R_LARCH_PCALA_HI20; + break; + default: + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); + return -EINVAL; + } + + return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type); +} + /* * reloc_handlers_rela() - Apply a particular relocation to a module * @mod: the module to apply the reloc to @@ -296,7 +376,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, /* The handlers for known reloc types */ static reloc_rela_handler reloc_rela_handlers[] = { - [R_LARCH_NONE ... R_LARCH_SUB64] = apply_r_larch_error, + [R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error, [R_LARCH_NONE] = apply_r_larch_none, [R_LARCH_32] = apply_r_larch_32, @@ -310,6 +390,9 @@ static reloc_rela_handler reloc_rela_handlers[] = { [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, + [R_LARCH_B26] = apply_r_larch_b26, + [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala, + [R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c new file mode 100644 index 000000000000..707bd32e5c4f --- /dev/null +++ b/arch/loongarch/kernel/perf_event.c @@ -0,0 +1,887 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux performance counter support for LoongArch. + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2010 MIPS Technologies, Inc. + * Copyright (C) 2011 Cavium Networks, Inc. + * Author: Deng-Cheng Zhu + */ + +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/uaccess.h> +#include <linux/sched/task_stack.h> + +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/stacktrace.h> +#include <asm/unwind.h> + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static unsigned long +user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp) +{ + unsigned long err; + unsigned long __user *user_frame_tail; + struct stack_frame buftail; + + user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame)); + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(user_frame_tail, sizeof(buftail))) + return 0; + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail)); + pagefault_enable(); + + if (err || (unsigned long)user_frame_tail >= buftail.fp) + return 0; + + perf_callchain_store(entry, buftail.ra); + + return buftail.fp; +} + +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long fp; + + if (perf_guest_state()) { + /* We don't support guest os callchain now */ + return; + } + + perf_callchain_store(entry, regs->csr_era); + + fp = regs->regs[22]; + + while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf)) + fp = user_backtrace(entry, fp); +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + struct unwind_state state; + unsigned long addr; + + for (unwind_start(&state, current, regs); + !unwind_done(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr || perf_callchain_store(entry, addr)) + return; + } +} + +#define LOONGARCH_MAX_HWEVENTS 32 + +struct cpu_hw_events { + /* Array of events on this cpu. */ + struct perf_event *events[LOONGARCH_MAX_HWEVENTS]; + + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)]; + + /* + * Software copy of the control register for each performance counter. + */ + unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS]; +}; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { + .saved_ctrl = {0}, +}; + +/* The description of LoongArch performance events. */ +struct loongarch_perf_event { + unsigned int event_id; +}; + +static struct loongarch_perf_event raw_event; +static DEFINE_MUTEX(raw_event_mutex); + +#define C(x) PERF_COUNT_HW_CACHE_##x +#define HW_OP_UNSUPPORTED 0xffffffff +#define CACHE_OP_UNSUPPORTED 0xffffffff + +#define PERF_MAP_ALL_UNSUPPORTED \ + [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED} + +#define PERF_CACHE_MAP_ALL_UNSUPPORTED \ +[0 ... C(MAX) - 1] = { \ + [0 ... C(OP_MAX) - 1] = { \ + [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \ + }, \ +} + +struct loongarch_pmu { + u64 max_period; + u64 valid_count; + u64 overflow; + const char *name; + unsigned int num_counters; + u64 (*read_counter)(unsigned int idx); + void (*write_counter)(unsigned int idx, u64 val); + const struct loongarch_perf_event *(*map_raw_event)(u64 config); + const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; + const struct loongarch_perf_event (*cache_event_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; +}; + +static struct loongarch_pmu loongarch_pmu; + +#define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT) + +#define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \ + CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | \ + CSR_PERFCTRL_PLV3 | \ + CSR_PERFCTRL_IE) + +#define M_PERFCTL_CONFIG_MASK 0x1f0000 + +static void pause_local_counters(void); +static void resume_local_counters(void); + +static u64 loongarch_pmu_read_counter(unsigned int idx) +{ + u64 val = -1; + + switch (idx) { + case 0: + val = read_csr_perfcntr0(); + break; + case 1: + val = read_csr_perfcntr1(); + break; + case 2: + val = read_csr_perfcntr2(); + break; + case 3: + val = read_csr_perfcntr3(); + break; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } + + return val; +} + +static void loongarch_pmu_write_counter(unsigned int idx, u64 val) +{ + switch (idx) { + case 0: + write_csr_perfcntr0(val); + return; + case 1: + write_csr_perfcntr1(val); + return; + case 2: + write_csr_perfcntr2(val); + return; + case 3: + write_csr_perfcntr3(val); + return; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return; + } +} + +static unsigned int loongarch_pmu_read_control(unsigned int idx) +{ + unsigned int val = -1; + + switch (idx) { + case 0: + val = read_csr_perfctrl0(); + break; + case 1: + val = read_csr_perfctrl1(); + break; + case 2: + val = read_csr_perfctrl2(); + break; + case 3: + val = read_csr_perfctrl3(); + break; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return 0; + } + + return val; +} + +static void loongarch_pmu_write_control(unsigned int idx, unsigned int val) +{ + switch (idx) { + case 0: + write_csr_perfctrl0(val); + return; + case 1: + write_csr_perfctrl1(val); + return; + case 2: + write_csr_perfctrl2(val); + return; + case 3: + write_csr_perfctrl3(val); + return; + default: + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); + return; + } +} + +static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc) +{ + int i; + + for (i = 0; i < loongarch_pmu.num_counters; i++) { + if (!test_and_set_bit(i, cpuc->used_mask)) + return i; + } + + return -EAGAIN; +} + +static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx) +{ + unsigned int cpu; + struct perf_event *event = container_of(evt, struct perf_event, hw); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + /* Make sure interrupt enabled. */ + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | + (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE; + + cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); + + /* + * We do not actually let the counter run. Leave it until start(). + */ + pr_debug("Enabling perf counter for CPU%d\n", cpu); +} + +static void loongarch_pmu_disable_event(int idx) +{ + unsigned long flags; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + local_irq_save(flags); + cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) & + ~M_PERFCTL_COUNT_EVENT_WHENEVER; + loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]); + local_irq_restore(flags); +} + +static int loongarch_pmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + int ret = 0; + u64 left = local64_read(&hwc->period_left); + u64 period = hwc->sample_period; + + if (unlikely((left + period) & (1ULL << 63))) { + /* left underflowed by more than period. */ + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } else if (unlikely((left + period) <= period)) { + /* left underflowed by less than period. */ + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > loongarch_pmu.max_period) { + left = loongarch_pmu.max_period; + local64_set(&hwc->period_left, left); + } + + local64_set(&hwc->prev_count, loongarch_pmu.overflow - left); + + loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left); + + perf_event_update_userpage(event); + + return ret; +} + +static void loongarch_pmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + u64 delta; + u64 prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = loongarch_pmu.read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = new_raw_count - prev_raw_count; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); +} + +static void loongarch_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + loongarch_pmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + loongarch_pmu_enable_event(hwc, hwc->idx); +} + +static void loongarch_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + loongarch_pmu_disable_event(hwc->idx); + barrier(); + loongarch_pmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int loongarch_pmu_add(struct perf_event *event, int flags) +{ + int idx, err = 0; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + + perf_pmu_disable(event->pmu); + + /* To look for a free counter for this event. */ + idx = loongarch_pmu_alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; + loongarch_pmu_disable_event(idx); + cpuc->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + loongarch_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static void loongarch_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters); + + loongarch_pmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void loongarch_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + loongarch_pmu_event_update(event, hwc, hwc->idx); +} + +static void loongarch_pmu_enable(struct pmu *pmu) +{ + resume_local_counters(); +} + +static void loongarch_pmu_disable(struct pmu *pmu) +{ + pause_local_counters(); +} + +static DEFINE_MUTEX(pmu_reserve_mutex); +static atomic_t active_events = ATOMIC_INIT(0); + +static int get_pmc_irq(void) +{ + struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + + if (d) + return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START); + + return -EINVAL; +} + +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + on_each_cpu(reset_counters, NULL, 1); + free_irq(get_pmc_irq(), &loongarch_pmu); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static void handle_associated_event(struct cpu_hw_events *cpuc, int idx, + struct perf_sample_data *data, struct pt_regs *regs) +{ + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc = &event->hw; + + loongarch_pmu_event_update(event, hwc, idx); + data->period = event->hw.last_period; + if (!loongarch_pmu_event_set_period(event, hwc, idx)) + return; + + if (perf_event_overflow(event, data, regs)) + loongarch_pmu_disable_event(idx); +} + +static irqreturn_t pmu_handle_irq(int irq, void *dev) +{ + int n; + int handled = IRQ_NONE; + uint64_t counter; + struct pt_regs *regs; + struct perf_sample_data data; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * First we pause the local counters, so that when we are locked + * here, the counters are all paused. When it gets locked due to + * perf_disable(), the timer interrupt handler will be delayed. + * + * See also loongarch_pmu_start(). + */ + pause_local_counters(); + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0, 0); + + for (n = 0; n < loongarch_pmu.num_counters; n++) { + if (test_bit(n, cpuc->used_mask)) { + counter = loongarch_pmu.read_counter(n); + if (counter & loongarch_pmu.overflow) { + handle_associated_event(cpuc, n, &data, regs); + handled = IRQ_HANDLED; + } + } + } + + resume_local_counters(); + + /* + * Do all the work for the pending perf events. We can do this + * in here because the performance counter interrupt is a regular + * interrupt, not NMI. + */ + if (handled == IRQ_HANDLED) + irq_work_run(); + + return handled; +} + +static int loongarch_pmu_event_init(struct perf_event *event) +{ + int r, irq; + unsigned long flags; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + /* Init it to avoid false validate_group */ + event->hw.event_base = 0xffffffff; + return -ENOENT; + } + + if (event->cpu >= 0 && !cpu_online(event->cpu)) + return -ENODEV; + + irq = get_pmc_irq(); + flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; + if (!atomic_inc_not_zero(&active_events)) { + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) { + r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu); + if (r < 0) { + mutex_unlock(&pmu_reserve_mutex); + pr_warn("PMU IRQ request failed\n"); + return -ENODEV; + } + } + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + return __hw_perf_event_init(event); +} + +static struct pmu pmu = { + .pmu_enable = loongarch_pmu_enable, + .pmu_disable = loongarch_pmu_disable, + .event_init = loongarch_pmu_event_init, + .add = loongarch_pmu_add, + .del = loongarch_pmu_del, + .start = loongarch_pmu_start, + .stop = loongarch_pmu_stop, + .read = loongarch_pmu_read, +}; + +static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev) +{ + return (pev->event_id & 0xff); +} + +static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx) +{ + const struct loongarch_perf_event *pev; + + pev = &(*loongarch_pmu.general_event_map)[idx]; + + if (pev->event_id == HW_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + return pev; +} + +static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct loongarch_perf_event *pev; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pev = &((*loongarch_pmu.cache_event_map) + [cache_type] + [cache_op] + [cache_result]); + + if (pev->event_id == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + return pev; +} + +static int validate_group(struct perf_event *event) +{ + struct cpu_hw_events fake_cpuc; + struct perf_event *sibling, *leader = event->group_leader; + + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); + + if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) + return -EINVAL; + + for_each_sibling_event(sibling, leader) { + if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) + return -EINVAL; + } + + if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) + return -EINVAL; + + return 0; +} + +static void reset_counters(void *arg) +{ + int n; + int counters = loongarch_pmu.num_counters; + + for (n = 0; n < counters; n++) { + loongarch_pmu_write_control(n, 0); + loongarch_pmu.write_counter(n, 0); + } +} + +static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 }, +}; + +static const struct loongarch_perf_event loongson_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +PERF_CACHE_MAP_ALL_UNSUPPORTED, +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x8 }, + [C(RESULT_MISS)] = { 0x9 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x8 }, + [C(RESULT_MISS)] = { 0x9 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { 0xaa }, + [C(RESULT_MISS)] = { 0xa9 }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x6 }, + [C(RESULT_MISS)] = { 0x7 }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0xc }, + [C(RESULT_MISS)] = { 0xd }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0xc }, + [C(RESULT_MISS)] = { 0xd }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x3b }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x4 }, + [C(RESULT_MISS)] = { 0x3c }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x4 }, + [C(RESULT_MISS)] = { 0x3c }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02 }, + [C(RESULT_MISS)] = { 0x03 }, + }, +}, +}; + +static int __hw_perf_event_init(struct perf_event *event) +{ + int err; + struct hw_perf_event *hwc = &event->hw; + struct perf_event_attr *attr = &event->attr; + const struct loongarch_perf_event *pev; + + /* Returning LoongArch event descriptor for generic perf event. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + if (event->attr.config >= PERF_COUNT_HW_MAX) + return -EINVAL; + pev = loongarch_pmu_map_general_event(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + pev = loongarch_pmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + /* We are working on the global raw event. */ + mutex_lock(&raw_event_mutex); + pev = loongarch_pmu.map_raw_event(event->attr.config); + } else { + /* The event type is not (yet) supported. */ + return -EOPNOTSUPP; + } + + if (IS_ERR(pev)) { + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + return PTR_ERR(pev); + } + + /* + * We allow max flexibility on how each individual counter shared + * by the single CPU operates (the mode exclusion and the range). + */ + hwc->config_base = CSR_PERFCTRL_IE; + + hwc->event_base = loongarch_pmu_perf_event_encode(pev); + if (PERF_TYPE_RAW == event->attr.type) + mutex_unlock(&raw_event_mutex); + + if (!attr->exclude_user) { + hwc->config_base |= CSR_PERFCTRL_PLV3; + hwc->config_base |= CSR_PERFCTRL_PLV2; + } + if (!attr->exclude_kernel) { + hwc->config_base |= CSR_PERFCTRL_PLV0; + } + if (!attr->exclude_hv) { + hwc->config_base |= CSR_PERFCTRL_PLV1; + } + + hwc->config_base &= M_PERFCTL_CONFIG_MASK; + /* + * The event can belong to another cpu. We do not assign a local + * counter for it for now. + */ + hwc->idx = -1; + hwc->config = 0; + + if (!hwc->sample_period) { + hwc->sample_period = loongarch_pmu.max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) + err = validate_group(event); + + event->destroy = hw_perf_event_destroy; + + if (err) + event->destroy(event); + + return err; +} + +static void pause_local_counters(void) +{ + unsigned long flags; + int ctr = loongarch_pmu.num_counters; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + local_irq_save(flags); + do { + ctr--; + cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr); + loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & + ~M_PERFCTL_COUNT_EVENT_WHENEVER); + } while (ctr > 0); + local_irq_restore(flags); +} + +static void resume_local_counters(void) +{ + int ctr = loongarch_pmu.num_counters; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + do { + ctr--; + loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); + } while (ctr > 0); +} + +static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config) +{ + raw_event.event_id = config & 0xff; + + return &raw_event; +} + +static int __init init_hw_perf_events(void) +{ + int counters; + + if (!cpu_has_pmp) + return -ENODEV; + + pr_info("Performance counters: "); + counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1; + + loongarch_pmu.num_counters = counters; + loongarch_pmu.max_period = (1ULL << 63) - 1; + loongarch_pmu.valid_count = (1ULL << 63) - 1; + loongarch_pmu.overflow = 1ULL << 63; + loongarch_pmu.name = "loongarch/loongson64"; + loongarch_pmu.read_counter = loongarch_pmu_read_counter; + loongarch_pmu.write_counter = loongarch_pmu_write_counter; + loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event; + loongarch_pmu.general_event_map = &loongson_event_map; + loongarch_pmu.cache_event_map = &loongson_cache_map; + + on_each_cpu(reset_counters, NULL, 1); + + pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n", + loongarch_pmu.name, counters, 64); + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c new file mode 100644 index 000000000000..263ac4ab5af6 --- /dev/null +++ b/arch/loongarch/kernel/perf_regs.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + * + * Derived from MIPS: + * Copyright (C) 2013 Cavium, Inc. + */ + +#include <linux/perf_event.h> + +#include <asm/ptrace.h> + +#ifdef CONFIG_32BIT +u64 perf_reg_abi(struct task_struct *tsk) +{ + return PERF_SAMPLE_REGS_ABI_32; +} +#else /* Must be CONFIG_64BIT */ +u64 perf_reg_abi(struct task_struct *tsk) +{ + if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS)) + return PERF_SAMPLE_REGS_ABI_32; + else + return PERF_SAMPLE_REGS_ABI_64; +} +#endif /* CONFIG_32BIT */ + +int perf_reg_validate(u64 mask) +{ + if (!mask) + return -EINVAL; + if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1)) + return -EINVAL; + return 0; +} + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX)) + return 0; + + if ((u32)idx == PERF_REG_LOONGARCH_PC) + return regs->csr_era; + + return regs->regs[idx]; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S new file mode 100644 index 000000000000..d13252553a7c --- /dev/null +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#include <linux/kexec.h> + +#include <asm/asm.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> +#include <asm/loongarch.h> +#include <asm/stackframe.h> +#include <asm/addrspace.h> + +SYM_CODE_START(relocate_new_kernel) + /* + * a0: EFI boot flag for the new kernel + * a1: Command line pointer for the new kernel + * a2: System table pointer for the new kernel + * a3: Start address to jump to after relocation + * a4: Pointer to the current indirection page entry + */ + move s0, a4 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beqz s0, done + +process_entry: + PTR_L s1, s0, 0 + PTR_ADDI s0, s0, SZREG + + /* destination page */ + andi s2, s1, IND_DESTINATION + beqz s2, 1f + li.w t0, ~0x1 + and s3, s1, t0 /* store destination addr in s3 */ + b process_entry + +1: + /* indirection page, update s0 */ + andi s2, s1, IND_INDIRECTION + beqz s2, 1f + li.w t0, ~0x2 + and s0, s1, t0 + b process_entry + +1: + /* done page */ + andi s2, s1, IND_DONE + beqz s2, 1f + b done + +1: + /* source page */ + andi s2, s1, IND_SOURCE + beqz s2, process_entry + li.w t0, ~0x8 + and s1, s1, t0 + li.w s5, (1 << _PAGE_SHIFT) / SZREG + +copy_word: + /* copy page word by word */ + REG_L s4, s1, 0 + REG_S s4, s3, 0 + PTR_ADDI s3, s3, SZREG + PTR_ADDI s1, s1, SZREG + LONG_ADDI s5, s5, -1 + beqz s5, process_entry + b copy_word + b process_entry + +done: + ibar 0 + dbar 0 + + /* + * Jump to the new kernel, + * make sure the values of a0, a1, a2 and a3 are not changed. + */ + jr a3 +SYM_CODE_END(relocate_new_kernel) + +#ifdef CONFIG_SMP +/* + * Other CPUs should wait until code is relocated and + * then start at the entry point from LOONGARCH_IOCSR_MBUF0. + */ +SYM_CODE_START(kexec_smp_wait) +1: li.w t0, 0x100 /* wait for init loop */ +2: addi.w t0, t0, -1 /* limit mailbox access */ + bnez t0, 2b + li.w t1, LOONGARCH_IOCSR_MBUF0 + iocsrrd.w s0, t1 /* check PC as an indicator */ + beqz s0, 1b + iocsrrd.d s0, t1 /* get PC via mailbox */ + + li.d t0, CACHE_BASE + or s0, s0, t0 /* s0 = TO_CACHE(s0) */ + jr s0 /* jump to initial PC */ +SYM_CODE_END(kexec_smp_wait) +#endif + +relocate_new_kernel_end: + +SYM_DATA_START(relocate_new_kernel_size) + PTR relocate_new_kernel_end - relocate_new_kernel +SYM_DATA_END(relocate_new_kernel_size) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 5b49c78c23f4..1eb63fa9bc81 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -19,6 +19,8 @@ #include <linux/memblock.h> #include <linux/initrd.h> #include <linux/ioport.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/pfn.h> @@ -185,8 +187,70 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); +static void __init arch_reserve_vmcore(void) +{ +#ifdef CONFIG_PROC_VMCORE + u64 i; + phys_addr_t start, end; + + if (!is_kdump_kernel()) + return; + + if (!elfcorehdr_size) { + for_each_mem_range(i, &start, &end) { + if (elfcorehdr_addr >= start && elfcorehdr_addr < end) { + /* + * Reserve from the elf core header to the end of + * the memory segment, that should all be kdump + * reserved memory. + */ + elfcorehdr_size = end - elfcorehdr_addr; + break; + } + } + } + + if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { + pr_warn("elfcorehdr is overlapped\n"); + return; + } + + memblock_reserve(elfcorehdr_addr, elfcorehdr_size); + + pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n", + elfcorehdr_size >> 10, elfcorehdr_addr); +#endif +} + +static void __init arch_parse_crashkernel(void) +{ +#ifdef CONFIG_KEXEC + int ret; + unsigned long long start; + unsigned long long total_mem; + unsigned long long crash_base, crash_size; + + total_mem = memblock_phys_mem_size(); + ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); + if (ret < 0 || crash_size <= 0) + return; + + start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size); + if (start != crash_base) { + pr_warn("Invalid memory region reserved for crash kernel\n"); + return; + } + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +#endif +} + void __init platform_init(void) { + arch_reserve_vmcore(); + arch_parse_crashkernel(); + #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); #endif @@ -289,6 +353,15 @@ static void __init resource_init(void) request_resource(res, &data_resource); request_resource(res, &bss_resource); } + +#ifdef CONFIG_KEXEC + if (crashk_res.start < crashk_res.end) { + insert_resource(&iomem_resource, &crashk_res); + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", + (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20), + (unsigned long)(crashk_res.start >> 20)); + } +#endif } static int __init reserve_memblock_reserved_regions(void) @@ -348,10 +421,11 @@ void __init setup_arch(char **cmdline_p) init_environ(); efi_init(); memblock_init(); + pagetable_init(); parse_early_param(); + reserve_initrd_mem(); platform_init(); - pagetable_init(); arch_mem_init(cmdline_p); resource_init(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index b5fab308dcf2..781a4d4bdddc 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -240,11 +240,6 @@ void loongson3_smp_finish(void) #ifdef CONFIG_HOTPLUG_CPU -static bool io_master(int cpu) -{ - return test_bit(cpu, &loongson_sysconf.cores_io_master); -} - int loongson3_cpu_disable(void) { unsigned long flags; diff --git a/arch/loongarch/kernel/sysrq.c b/arch/loongarch/kernel/sysrq.c new file mode 100644 index 000000000000..366baef72d29 --- /dev/null +++ b/arch/loongarch/kernel/sysrq.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LoongArch specific sysrq operations. + * + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/sysrq.h> +#include <linux/workqueue.h> + +#include <asm/cpu-features.h> +#include <asm/tlb.h> + +/* + * Dump TLB entries on all CPUs. + */ + +static DEFINE_SPINLOCK(show_lock); + +static void sysrq_tlbdump_single(void *dummy) +{ + unsigned long flags; + + spin_lock_irqsave(&show_lock, flags); + + pr_info("CPU%d:\n", smp_processor_id()); + dump_tlb_regs(); + pr_info("\n"); + dump_tlb_all(); + pr_info("\n"); + + spin_unlock_irqrestore(&show_lock, flags); +} + +#ifdef CONFIG_SMP +static void sysrq_tlbdump_othercpus(struct work_struct *dummy) +{ + smp_call_function(sysrq_tlbdump_single, NULL, 0); +} + +static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus); +#endif + +static void sysrq_handle_tlbdump(int key) +{ + sysrq_tlbdump_single(NULL); +#ifdef CONFIG_SMP + schedule_work(&sysrq_tlbdump); +#endif +} + +static struct sysrq_key_op sysrq_tlbdump_op = { + .handler = sysrq_handle_tlbdump, + .help_msg = "show-tlbs(x)", + .action_msg = "Show TLB entries", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; + +static int __init loongarch_sysrq_init(void) +{ + return register_sysrq_key('x', &sysrq_tlbdump_op); +} +arch_initcall(loongarch_sysrq_init); diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c index ab1a75c0b5a6..caa7cd859078 100644 --- a/arch/loongarch/kernel/topology.c +++ b/arch/loongarch/kernel/topology.c @@ -5,6 +5,7 @@ #include <linux/node.h> #include <linux/nodemask.h> #include <linux/percpu.h> +#include <asm/bootinfo.h> static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -40,7 +41,7 @@ static int __init topology_init(void) for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); - c->hotpluggable = !!i; + c->hotpluggable = !io_master(i); ret = register_cpu(c, i); if (ret < 0) pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index 5010e95cef84..1a4dce84ebc6 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -10,6 +10,7 @@ #include <linux/entry-common.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kexec.h> #include <linux/module.h> #include <linux/extable.h> #include <linux/mm.h> @@ -246,6 +247,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) oops_exit(); + if (regs && kexec_should_crash(current)) + crash_kexec(regs); + if (in_interrupt()) panic("Fatal exception in interrupt"); @@ -374,6 +378,29 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) irqentry_exit(regs, state); } +#ifdef CONFIG_GENERIC_BUG +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} +#endif /* CONFIG_GENERIC_BUG */ + +static void bug_handler(struct pt_regs *regs) +{ + switch (report_bug(regs->csr_era, regs)) { + case BUG_TRAP_TYPE_BUG: + case BUG_TRAP_TYPE_NONE: + die_if_kernel("Oops - BUG", regs); + force_sig(SIGTRAP); + break; + + case BUG_TRAP_TYPE_WARN: + /* Skip the BUG instruction and continue */ + regs->csr_era += LOONGARCH_INSN_SIZE; + break; + } +} + asmlinkage void noinstr do_bp(struct pt_regs *regs) { bool user = user_mode(regs); @@ -427,8 +454,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs) switch (bcode) { case BRK_BUG: - die_if_kernel("Kernel bug detected", regs); - force_sig(SIGTRAP); + bug_handler(regs); break; case BRK_DIVZERO: die_if_kernel("Break instruction in kernel code", regs); @@ -620,9 +646,6 @@ asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp) irqentry_exit(regs, state); } -extern void tlb_init(int cpu); -extern void cache_error_setup(void); - unsigned long eentry; unsigned long tlbrentry; diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index e5890bec2bf6..b3309a5e695b 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -55,6 +55,10 @@ SECTIONS EXCEPTION_TABLE(16) + .got : ALIGN(16) { *(.got) } + .plt : ALIGN(16) { *(.plt) } + .got.plt : ALIGN(16) { *(.got.plt) } + . = ALIGN(PECOFF_SEGMENT_ALIGN); __init_begin = .; __inittext_begin = .; diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c index e8c68dcf6ab2..72685a48eaf0 100644 --- a/arch/loongarch/mm/cache.c +++ b/arch/loongarch/mm/cache.c @@ -6,8 +6,8 @@ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected]) * Copyright (C) 2007 MIPS Technologies, Inc. */ +#include <linux/cacheinfo.h> #include <linux/export.h> -#include <linux/fcntl.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/kernel.h> @@ -16,14 +16,21 @@ #include <linux/sched.h> #include <linux/syscalls.h> +#include <asm/bootinfo.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/cpu-features.h> -#include <asm/dma.h> #include <asm/loongarch.h> +#include <asm/numa.h> #include <asm/processor.h> #include <asm/setup.h> +void cache_error_setup(void) +{ + extern char __weak except_vec_cex; + set_merr_handler(0x0, &except_vec_cex, 0x80); +} + /* * LoongArch maintains ICache/DCache coherency by hardware, * we just need "ibar" to avoid instruction hazard here. @@ -34,109 +41,121 @@ void local_flush_icache_range(unsigned long start, unsigned long end) } EXPORT_SYMBOL(local_flush_icache_range); -void cache_error_setup(void) -{ - extern char __weak except_vec_cex; - set_merr_handler(0x0, &except_vec_cex, 0x80); -} - -static unsigned long icache_size __read_mostly; -static unsigned long dcache_size __read_mostly; -static unsigned long vcache_size __read_mostly; -static unsigned long scache_size __read_mostly; - -static char *way_string[] = { NULL, "direct mapped", "2-way", - "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", - "9-way", "10-way", "11-way", "12-way", - "13-way", "14-way", "15-way", "16-way", -}; - -static void probe_pcache(void) +static void flush_cache_leaf(unsigned int leaf) { - struct cpuinfo_loongarch *c = ¤t_cpu_data; - unsigned int lsize, sets, ways; - unsigned int config; - - config = read_cpucfg(LOONGARCH_CPUCFG17); - lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE); - sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS); - ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1; - - c->icache.linesz = lsize; - c->icache.sets = sets; - c->icache.ways = ways; - icache_size = sets * ways * lsize; - c->icache.waysize = icache_size / c->icache.ways; - - config = read_cpucfg(LOONGARCH_CPUCFG18); - lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE); - sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS); - ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1; - - c->dcache.linesz = lsize; - c->dcache.sets = sets; - c->dcache.ways = ways; - dcache_size = sets * ways * lsize; - c->dcache.waysize = dcache_size / c->dcache.ways; - - c->options |= LOONGARCH_CPU_PREFETCH; - - pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", - icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz); - - pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", - dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz); + int i, j, nr_nodes; + uint64_t addr = CSR_DMW0_BASE; + struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf; + + nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes; + + do { + for (i = 0; i < cdesc->sets; i++) { + for (j = 0; j < cdesc->ways; j++) { + flush_cache_line(leaf, addr); + addr++; + } + + addr -= cdesc->ways; + addr += cdesc->linesz; + } + addr += (1ULL << NODE_ADDRSPACE_SHIFT); + } while (--nr_nodes > 0); } -static void probe_vcache(void) +asmlinkage __visible void __flush_cache_all(void) { - struct cpuinfo_loongarch *c = ¤t_cpu_data; - unsigned int lsize, sets, ways; - unsigned int config; - - config = read_cpucfg(LOONGARCH_CPUCFG19); - lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE); - sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS); - ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1; - - c->vcache.linesz = lsize; - c->vcache.sets = sets; - c->vcache.ways = ways; - vcache_size = lsize * sets * ways; - c->vcache.waysize = vcache_size / c->vcache.ways; - - pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", - vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); + int leaf; + struct cache_desc *cdesc = current_cpu_data.cache_leaves; + unsigned int cache_present = current_cpu_data.cache_leaves_present; + + leaf = cache_present - 1; + if (cache_inclusive(cdesc + leaf)) { + flush_cache_leaf(leaf); + return; + } + + for (leaf = 0; leaf < cache_present; leaf++) + flush_cache_leaf(leaf); } -static void probe_scache(void) -{ - struct cpuinfo_loongarch *c = ¤t_cpu_data; - unsigned int lsize, sets, ways; - unsigned int config; - - config = read_cpucfg(LOONGARCH_CPUCFG20); - lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE); - sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS); - ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1; - - c->scache.linesz = lsize; - c->scache.sets = sets; - c->scache.ways = ways; - /* 4 cores. scaches are shared */ - scache_size = lsize * sets * ways; - c->scache.waysize = scache_size / c->scache.ways; - - pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", - scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); -} +#define L1IUPRE (1 << 0) +#define L1IUUNIFY (1 << 1) +#define L1DPRE (1 << 2) + +#define LXIUPRE (1 << 0) +#define LXIUUNIFY (1 << 1) +#define LXIUPRIV (1 << 2) +#define LXIUINCL (1 << 3) +#define LXDPRE (1 << 4) +#define LXDPRIV (1 << 5) +#define LXDINCL (1 << 6) + +#define populate_cache_properties(cfg0, cdesc, level, leaf) \ +do { \ + unsigned int cfg1; \ + \ + cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \ + if (level == 1) { \ + cdesc->flags |= CACHE_PRIVATE; \ + } else { \ + if (cfg0 & LXIUPRIV) \ + cdesc->flags |= CACHE_PRIVATE; \ + if (cfg0 & LXIUINCL) \ + cdesc->flags |= CACHE_INCLUSIVE; \ + } \ + cdesc->level = level; \ + cdesc->flags |= CACHE_PRESENT; \ + cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \ + cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \ + cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \ + cdesc++; leaf++; \ +} while (0) void cpu_cache_init(void) { - probe_pcache(); - probe_vcache(); - probe_scache(); - + unsigned int leaf = 0, level = 1; + unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16); + struct cache_desc *cdesc = current_cpu_data.cache_leaves; + + if (config & L1IUPRE) { + if (config & L1IUUNIFY) + cdesc->type = CACHE_TYPE_UNIFIED; + else + cdesc->type = CACHE_TYPE_INST; + populate_cache_properties(config, cdesc, level, leaf); + } + + if (config & L1DPRE) { + cdesc->type = CACHE_TYPE_DATA; + populate_cache_properties(config, cdesc, level, leaf); + } + + config = config >> 3; + for (level = 2; level <= CACHE_LEVEL_MAX; level++) { + if (!config) + break; + + if (config & LXIUPRE) { + if (config & LXIUUNIFY) + cdesc->type = CACHE_TYPE_UNIFIED; + else + cdesc->type = CACHE_TYPE_INST; + populate_cache_properties(config, cdesc, level, leaf); + } + + if (config & LXDPRE) { + cdesc->type = CACHE_TYPE_DATA; + populate_cache_properties(config, cdesc, level, leaf); + } + + config = config >> 7; + } + + BUG_ON(leaf > CACHE_LEAVES_MAX); + + current_cpu_data.cache_leaves_present = leaf; + current_cpu_data.options |= LOONGARCH_CPU_PREFETCH; shm_align_mask = PAGE_SIZE - 1; } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 0532ed5ba43d..080061793c85 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -152,6 +152,70 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif #endif +static pte_t *fixmap_pte(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + p4d = p4d_offset(pgd, addr); + + if (pgd_none(*pgd)) { + pud_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pgd_populate(&init_mm, pgd, new); +#ifndef __PAGETABLE_PUD_FOLDED + pud_init((unsigned long)new, (unsigned long)invalid_pmd_table); +#endif + } + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + pmd_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_init((unsigned long)new, (unsigned long)invalid_pte_table); +#endif + } + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + pte_t *new __maybe_unused; + + new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } + + return pte_offset_kernel(pmd, addr); +} + +void __init __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + if (!pte_none(*ptep)) { + pte_ERROR(*ptep); + return; + } + + if (pgprot_val(flags)) + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + else { + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + } +} + /* * Align swapper_pg_dir in to 64K, allows its address to be loaded * with a single LUI instruction in the TLB handlers. If we used diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c index 381a569635a9..fbe1a4856fc4 100644 --- a/arch/loongarch/mm/mmap.c +++ b/arch/loongarch/mm/mmap.c @@ -3,6 +3,8 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include <linux/export.h> +#include <linux/io.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/mman.h> @@ -116,3 +118,30 @@ int __virt_addr_valid(volatile void *kaddr) return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); } EXPORT_SYMBOL_GPL(__virt_addr_valid); + +/* + * You really shouldn't be using read() or write() on /dev/mem. This might go + * away in the future. + */ +int valid_phys_addr_range(phys_addr_t addr, size_t size) +{ + /* + * Check whether addr is covered by a memory region without the + * MEMBLOCK_NOMAP attribute, and whether that region covers the + * entire range. In theory, this could lead to false negatives + * if the range is covered by distinct but adjacent memory regions + * that only differ in other attributes. However, few of such + * attributes have been defined, and it is debatable whether it + * follows that /dev/mem read() calls should be able traverse + * such boundaries. + */ + return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr); +} + +/* + * Do not allow /dev/mem mappings beyond the supported physical range. + */ +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0))); +} diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 9818ce11546b..da3681f131c8 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -258,7 +258,7 @@ extern long exception_handlers[VECSIZE * 128 / sizeof(long)]; void setup_tlb_handler(int cpu) { setup_ptwalker(); - output_pgtable_bits_defines(); + local_flush_tlb_all(); /* The tlb handlers are generated only once */ if (cpu == 0) { @@ -301,6 +301,7 @@ void tlb_init(int cpu) write_csr_pagesize(PS_DEFAULT_SIZE); write_csr_stlbpgsize(PS_DEFAULT_SIZE); write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE); + setup_tlb_handler(cpu); - local_flush_tlb_all(); + output_pgtable_bits_defines(); } diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index 39743337999e..d8ee8fbc8c67 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -10,15 +10,20 @@ #include <asm/regdef.h> #include <asm/stackframe.h> +#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) +#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3) + .macro tlb_do_page_fault, write SYM_FUNC_START(tlb_do_page_fault_\write) SAVE_ALL - csrrd a2, LOONGARCH_CSR_BADV - move a0, sp - REG_S a2, sp, PT_BVADDR - li.w a1, \write - la.abs t0, do_page_fault - jirl ra, t0, 0 + csrrd a2, LOONGARCH_CSR_BADV + move a0, sp + REG_S a2, sp, PT_BVADDR + li.w a1, \write + la.abs t0, do_page_fault + jirl ra, t0, 0 RESTORE_ALL_AND_RET SYM_FUNC_END(tlb_do_page_fault_\write) .endm @@ -29,133 +34,115 @@ SYM_FUNC_START(handle_tlb_protect) BACKUP_T0T1 SAVE_ALL - move a0, sp - move a1, zero - csrrd a2, LOONGARCH_CSR_BADV - REG_S a2, sp, PT_BVADDR - la.abs t0, do_page_fault - jirl ra, t0, 0 + move a0, sp + move a1, zero + csrrd a2, LOONGARCH_CSR_BADV + REG_S a2, sp, PT_BVADDR + la.abs t0, do_page_fault + jirl ra, t0, 0 RESTORE_ALL_AND_RET SYM_FUNC_END(handle_tlb_protect) SYM_FUNC_START(handle_tlb_load) - csrwr t0, EXCEPTION_KS0 - csrwr t1, EXCEPTION_KS1 - csrwr ra, EXCEPTION_KS2 + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 /* * The vmalloc handling is not in the hotpath. */ - csrrd t0, LOONGARCH_CSR_BADV - bltz t0, vmalloc_load - csrrd t1, LOONGARCH_CSR_PGDL + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_load + csrrd t1, LOONGARCH_CSR_PGDL vmalloc_done_load: /* Get PGD offset in bytes */ - srli.d t0, t0, PGDIR_SHIFT - andi t0, t0, (PTRS_PER_PGD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 #if CONFIG_PGTABLE_LEVELS > 3 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PUD_SHIFT - andi t0, t0, (PTRS_PER_PUD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 #endif #if CONFIG_PGTABLE_LEVELS > 2 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PMD_SHIFT - andi t0, t0, (PTRS_PER_PMD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 #endif - ld.d ra, t1, 0 + ld.d ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - andi t0, ra, _PAGE_HUGE - bnez t0, tlb_huge_update_load + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_load - csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, PAGE_SHIFT - andi t0, t0, (PTRS_PER_PTE - 1) - slli.d t0, t0, _PTE_T_LOG2 - add.d t1, ra, t0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_load: -#endif -#ifdef CONFIG_SMP - ll.d t0, t1, 0 + ll.d t0, t1, 0 #else - ld.d t0, t1, 0 + ld.d t0, t1, 0 #endif - tlbsrch - - srli.d ra, t0, _PAGE_PRESENT_SHIFT - andi ra, ra, 1 - beqz ra, nopage_tlb_load + andi ra, t0, _PAGE_PRESENT + beqz ra, nopage_tlb_load - ori t0, t0, _PAGE_VALID + ori t0, t0, _PAGE_VALID #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, smp_pgtable_change_load + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_load #else - st.d t0, t1, 0 + st.d t0, t1, 0 #endif - ori t1, t1, 8 - xori t1, t1, 8 - ld.d t0, t1, 0 - ld.d t1, t1, 8 - csrwr t0, LOONGARCH_CSR_TLBELO0 - csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr -leave_load: - csrrd t0, EXCEPTION_KS0 - csrrd t1, EXCEPTION_KS1 - csrrd ra, EXCEPTION_KS2 + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 ertn + #ifdef CONFIG_64BIT vmalloc_load: - la.abs t1, swapper_pg_dir - b vmalloc_done_load + la.abs t1, swapper_pg_dir + b vmalloc_done_load #endif - /* - * This is the entry point when build_tlbchange_handler_head - * spots a huge page. - */ + /* This is the entry point of a huge page. */ tlb_huge_update_load: #ifdef CONFIG_SMP - ll.d t0, t1, 0 -#else - ld.d t0, t1, 0 + ll.d ra, t1, 0 #endif - srli.d ra, t0, _PAGE_PRESENT_SHIFT - andi ra, ra, 1 - beqz ra, nopage_tlb_load - tlbsrch + andi t0, ra, _PAGE_PRESENT + beqz t0, nopage_tlb_load - ori t0, t0, _PAGE_VALID #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, tlb_huge_update_load - ld.d t0, t1, 0 + ori t0, ra, _PAGE_VALID + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_load + ori t0, ra, _PAGE_VALID #else - st.d t0, t1, 0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, _PAGE_VALID + st.d t0, t1, 0 #endif + tlbsrch addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16) addi.d ra, t1, 0 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX tlbwr - csrxchg zero, t1, LOONGARCH_CSR_TLBIDX + csrxchg zero, t1, LOONGARCH_CSR_TLBIDX /* * A huge PTE describes an area the size of the @@ -167,21 +154,20 @@ tlb_huge_update_load: * address space. */ /* Huge page: Move Global bit */ - xori t0, t0, _PAGE_HUGE - lu12i.w t1, _PAGE_HGLOBAL >> 12 - and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) - or t0, t0, t1 + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 - addi.d ra, t0, 0 - csrwr t0, LOONGARCH_CSR_TLBELO0 - addi.d t0, ra, 0 + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 - csrwr t0, LOONGARCH_CSR_TLBELO1 + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) @@ -194,136 +180,120 @@ tlb_huge_update_load: addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + nopage_tlb_load: - dbar 0 - csrrd ra, EXCEPTION_KS2 - la.abs t0, tlb_do_page_fault_0 - jr t0 + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_0 + jr t0 SYM_FUNC_END(handle_tlb_load) SYM_FUNC_START(handle_tlb_store) - csrwr t0, EXCEPTION_KS0 - csrwr t1, EXCEPTION_KS1 - csrwr ra, EXCEPTION_KS2 + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 /* * The vmalloc handling is not in the hotpath. */ - csrrd t0, LOONGARCH_CSR_BADV - bltz t0, vmalloc_store - csrrd t1, LOONGARCH_CSR_PGDL + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_store + csrrd t1, LOONGARCH_CSR_PGDL vmalloc_done_store: /* Get PGD offset in bytes */ - srli.d t0, t0, PGDIR_SHIFT - andi t0, t0, (PTRS_PER_PGD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 - + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 #if CONFIG_PGTABLE_LEVELS > 3 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PUD_SHIFT - andi t0, t0, (PTRS_PER_PUD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 #endif #if CONFIG_PGTABLE_LEVELS > 2 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PMD_SHIFT - andi t0, t0, (PTRS_PER_PMD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 #endif - ld.d ra, t1, 0 + ld.d ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - andi t0, ra, _PAGE_HUGE - bnez t0, tlb_huge_update_store + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_store - csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, PAGE_SHIFT - andi t0, t0, (PTRS_PER_PTE - 1) - slli.d t0, t0, _PTE_T_LOG2 - add.d t1, ra, t0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_store: -#endif -#ifdef CONFIG_SMP - ll.d t0, t1, 0 + ll.d t0, t1, 0 #else - ld.d t0, t1, 0 + ld.d t0, t1, 0 #endif - tlbsrch - - srli.d ra, t0, _PAGE_PRESENT_SHIFT - andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) - xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) - bnez ra, nopage_tlb_store + andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE + xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE + bnez ra, nopage_tlb_store - ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, smp_pgtable_change_store + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_store #else - st.d t0, t1, 0 + st.d t0, t1, 0 #endif - - ori t1, t1, 8 - xori t1, t1, 8 - ld.d t0, t1, 0 - ld.d t1, t1, 8 - csrwr t0, LOONGARCH_CSR_TLBELO0 - csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr -leave_store: - csrrd t0, EXCEPTION_KS0 - csrrd t1, EXCEPTION_KS1 - csrrd ra, EXCEPTION_KS2 + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 ertn + #ifdef CONFIG_64BIT vmalloc_store: - la.abs t1, swapper_pg_dir - b vmalloc_done_store + la.abs t1, swapper_pg_dir + b vmalloc_done_store #endif - /* - * This is the entry point when build_tlbchange_handler_head - * spots a huge page. - */ + /* This is the entry point of a huge page. */ tlb_huge_update_store: #ifdef CONFIG_SMP - ll.d t0, t1, 0 -#else - ld.d t0, t1, 0 + ll.d ra, t1, 0 #endif - srli.d ra, t0, _PAGE_PRESENT_SHIFT - andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) - xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) - bnez ra, nopage_tlb_store - - tlbsrch - ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE + xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE + bnez t0, nopage_tlb_store #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, tlb_huge_update_store - ld.d t0, t1, 0 + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_store + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else - st.d t0, t1, 0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 #endif + tlbsrch addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16) addi.d ra, t1, 0 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX tlbwr - csrxchg zero, t1, LOONGARCH_CSR_TLBIDX + csrxchg zero, t1, LOONGARCH_CSR_TLBIDX /* * A huge PTE describes an area the size of the * configured huge page size. This is twice the @@ -334,21 +304,20 @@ tlb_huge_update_store: * address space. */ /* Huge page: Move Global bit */ - xori t0, t0, _PAGE_HUGE - lu12i.w t1, _PAGE_HGLOBAL >> 12 - and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) - or t0, t0, t1 + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 - addi.d ra, t0, 0 - csrwr t0, LOONGARCH_CSR_TLBELO0 - addi.d t0, ra, 0 + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 - csrwr t0, LOONGARCH_CSR_TLBELO1 + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) @@ -362,126 +331,110 @@ tlb_huge_update_store: addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + nopage_tlb_store: - dbar 0 - csrrd ra, EXCEPTION_KS2 - la.abs t0, tlb_do_page_fault_1 - jr t0 + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jr t0 SYM_FUNC_END(handle_tlb_store) SYM_FUNC_START(handle_tlb_modify) - csrwr t0, EXCEPTION_KS0 - csrwr t1, EXCEPTION_KS1 - csrwr ra, EXCEPTION_KS2 + csrwr t0, EXCEPTION_KS0 + csrwr t1, EXCEPTION_KS1 + csrwr ra, EXCEPTION_KS2 /* * The vmalloc handling is not in the hotpath. */ - csrrd t0, LOONGARCH_CSR_BADV - bltz t0, vmalloc_modify - csrrd t1, LOONGARCH_CSR_PGDL + csrrd t0, LOONGARCH_CSR_BADV + bltz t0, vmalloc_modify + csrrd t1, LOONGARCH_CSR_PGDL vmalloc_done_modify: /* Get PGD offset in bytes */ - srli.d t0, t0, PGDIR_SHIFT - andi t0, t0, (PTRS_PER_PGD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT + alsl.d t1, ra, t1, 3 #if CONFIG_PGTABLE_LEVELS > 3 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PUD_SHIFT - andi t0, t0, (PTRS_PER_PUD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT + alsl.d t1, ra, t1, 3 #endif #if CONFIG_PGTABLE_LEVELS > 2 - csrrd t0, LOONGARCH_CSR_BADV - ld.d t1, t1, 0 - srli.d t0, t0, PMD_SHIFT - andi t0, t0, (PTRS_PER_PMD - 1) - slli.d t0, t0, 3 - add.d t1, t1, t0 + ld.d t1, t1, 0 + bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT + alsl.d t1, ra, t1, 3 #endif - ld.d ra, t1, 0 + ld.d ra, t1, 0 /* * For huge tlb entries, pmde doesn't contain an address but * instead contains the tlb pte. Check the PAGE_HUGE bit and * see if we need to jump to huge tlb processing. */ - andi t0, ra, _PAGE_HUGE - bnez t0, tlb_huge_update_modify + rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 + bltz ra, tlb_huge_update_modify - csrrd t0, LOONGARCH_CSR_BADV - srli.d t0, t0, PAGE_SHIFT - andi t0, t0, (PTRS_PER_PTE - 1) - slli.d t0, t0, _PTE_T_LOG2 - add.d t1, ra, t0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT + alsl.d t1, t0, ra, _PTE_T_LOG2 #ifdef CONFIG_SMP smp_pgtable_change_modify: -#endif -#ifdef CONFIG_SMP - ll.d t0, t1, 0 + ll.d t0, t1, 0 #else - ld.d t0, t1, 0 + ld.d t0, t1, 0 #endif - tlbsrch - - srli.d ra, t0, _PAGE_WRITE_SHIFT - andi ra, ra, 1 - beqz ra, nopage_tlb_modify + andi ra, t0, _PAGE_WRITE + beqz ra, nopage_tlb_modify - ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, smp_pgtable_change_modify + sc.d t0, t1, 0 + beqz t0, smp_pgtable_change_modify #else - st.d t0, t1, 0 + st.d t0, t1, 0 #endif - ori t1, t1, 8 - xori t1, t1, 8 - ld.d t0, t1, 0 - ld.d t1, t1, 8 - csrwr t0, LOONGARCH_CSR_TLBELO0 - csrwr t1, LOONGARCH_CSR_TLBELO1 + tlbsrch + bstrins.d t1, zero, 3, 3 + ld.d t0, t1, 0 + ld.d t1, t1, 8 + csrwr t0, LOONGARCH_CSR_TLBELO0 + csrwr t1, LOONGARCH_CSR_TLBELO1 tlbwr -leave_modify: - csrrd t0, EXCEPTION_KS0 - csrrd t1, EXCEPTION_KS1 - csrrd ra, EXCEPTION_KS2 + + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 ertn + #ifdef CONFIG_64BIT vmalloc_modify: - la.abs t1, swapper_pg_dir - b vmalloc_done_modify + la.abs t1, swapper_pg_dir + b vmalloc_done_modify #endif - /* - * This is the entry point when - * build_tlbchange_handler_head spots a huge page. - */ + /* This is the entry point of a huge page. */ tlb_huge_update_modify: #ifdef CONFIG_SMP - ll.d t0, t1, 0 -#else - ld.d t0, t1, 0 + ll.d ra, t1, 0 #endif - - srli.d ra, t0, _PAGE_WRITE_SHIFT - andi ra, ra, 1 - beqz ra, nopage_tlb_modify - - tlbsrch - ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + andi t0, ra, _PAGE_WRITE + beqz t0, nopage_tlb_modify #ifdef CONFIG_SMP - sc.d t0, t1, 0 - beqz t0, tlb_huge_update_modify - ld.d t0, t1, 0 + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + sc.d t0, t1, 0 + beqz t0, tlb_huge_update_modify + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) #else - st.d t0, t1, 0 + rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) + ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) + st.d t0, t1, 0 #endif /* * A huge PTE describes an area the size of the @@ -493,21 +446,20 @@ tlb_huge_update_modify: * address space. */ /* Huge page: Move Global bit */ - xori t0, t0, _PAGE_HUGE - lu12i.w t1, _PAGE_HGLOBAL >> 12 - and t1, t0, t1 - srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) - or t0, t0, t1 + xori t0, t0, _PAGE_HUGE + lu12i.w t1, _PAGE_HGLOBAL >> 12 + and t1, t0, t1 + srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) + or t0, t0, t1 - addi.d ra, t0, 0 - csrwr t0, LOONGARCH_CSR_TLBELO0 - addi.d t0, ra, 0 + move ra, t0 + csrwr ra, LOONGARCH_CSR_TLBELO0 /* Convert to entrylo1 */ - addi.d t1, zero, 1 - slli.d t1, t1, (HPAGE_SHIFT - 1) - add.d t0, t0, t1 - csrwr t0, LOONGARCH_CSR_TLBELO1 + addi.d t1, zero, 1 + slli.d t1, t1, (HPAGE_SHIFT - 1) + add.d t0, t0, t1 + csrwr t0, LOONGARCH_CSR_TLBELO1 /* Set huge page tlb entry size */ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) @@ -521,26 +473,31 @@ tlb_huge_update_modify: addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) csrxchg t1, t0, LOONGARCH_CSR_TLBIDX + csrrd t0, EXCEPTION_KS0 + csrrd t1, EXCEPTION_KS1 + csrrd ra, EXCEPTION_KS2 + ertn + nopage_tlb_modify: - dbar 0 - csrrd ra, EXCEPTION_KS2 - la.abs t0, tlb_do_page_fault_1 - jr t0 + dbar 0 + csrrd ra, EXCEPTION_KS2 + la.abs t0, tlb_do_page_fault_1 + jr t0 SYM_FUNC_END(handle_tlb_modify) SYM_FUNC_START(handle_tlb_refill) - csrwr t0, LOONGARCH_CSR_TLBRSAVE - csrrd t0, LOONGARCH_CSR_PGD - lddir t0, t0, 3 + csrwr t0, LOONGARCH_CSR_TLBRSAVE + csrrd t0, LOONGARCH_CSR_PGD + lddir t0, t0, 3 #if CONFIG_PGTABLE_LEVELS > 3 - lddir t0, t0, 2 + lddir t0, t0, 2 #endif #if CONFIG_PGTABLE_LEVELS > 2 - lddir t0, t0, 1 + lddir t0, t0, 1 #endif - ldpte t0, 0 - ldpte t0, 1 + ldpte t0, 0 + ldpte t0, 1 tlbfill - csrrd t0, LOONGARCH_CSR_TLBRSAVE + csrrd t0, LOONGARCH_CSR_TLBRSAVE ertn SYM_FUNC_END(handle_tlb_refill) diff --git a/arch/loongarch/net/Makefile b/arch/loongarch/net/Makefile new file mode 100644 index 000000000000..1ec12a0c324a --- /dev/null +++ b/arch/loongarch/net/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for arch/loongarch/net +# +# Copyright (C) 2022 Loongson Technology Corporation Limited +# +obj-$(CONFIG_BPF_JIT) += bpf_jit.o diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c new file mode 100644 index 000000000000..43f0a98efe38 --- /dev/null +++ b/arch/loongarch/net/bpf_jit.c @@ -0,0 +1,1179 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * BPF JIT compiler for LoongArch + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include "bpf_jit.h" + +#define REG_TCC LOONGARCH_GPR_A6 +#define TCC_SAVED LOONGARCH_GPR_S5 + +#define SAVE_RA BIT(0) +#define SAVE_TCC BIT(1) + +static const int regmap[] = { + /* return value from in-kernel function, and exit value for eBPF program */ + [BPF_REG_0] = LOONGARCH_GPR_A5, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = LOONGARCH_GPR_A0, + [BPF_REG_2] = LOONGARCH_GPR_A1, + [BPF_REG_3] = LOONGARCH_GPR_A2, + [BPF_REG_4] = LOONGARCH_GPR_A3, + [BPF_REG_5] = LOONGARCH_GPR_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = LOONGARCH_GPR_S0, + [BPF_REG_7] = LOONGARCH_GPR_S1, + [BPF_REG_8] = LOONGARCH_GPR_S2, + [BPF_REG_9] = LOONGARCH_GPR_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = LOONGARCH_GPR_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = LOONGARCH_GPR_T0, +}; + +static void mark_call(struct jit_ctx *ctx) +{ + ctx->flags |= SAVE_RA; +} + +static void mark_tail_call(struct jit_ctx *ctx) +{ + ctx->flags |= SAVE_TCC; +} + +static bool seen_call(struct jit_ctx *ctx) +{ + return (ctx->flags & SAVE_RA); +} + +static bool seen_tail_call(struct jit_ctx *ctx) +{ + return (ctx->flags & SAVE_TCC); +} + +static u8 tail_call_reg(struct jit_ctx *ctx) +{ + if (seen_call(ctx)) + return TCC_SAVED; + + return REG_TCC; +} + +/* + * eBPF prog stack layout: + * + * high + * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP + * | $ra | + * +-------------------------+ + * | $fp | + * +-------------------------+ + * | $s0 | + * +-------------------------+ + * | $s1 | + * +-------------------------+ + * | $s2 | + * +-------------------------+ + * | $s3 | + * +-------------------------+ + * | $s4 | + * +-------------------------+ + * | $s5 | + * +-------------------------+ <--BPF_REG_FP + * | prog->aux->stack_depth | + * | (optional) | + * current $sp -------------> +-------------------------+ + * low + */ +static void build_prologue(struct jit_ctx *ctx) +{ + int stack_adjust = 0, store_offset, bpf_stack_adjust; + + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + + /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */ + stack_adjust += sizeof(long) * 8; + + stack_adjust = round_up(stack_adjust, 16); + stack_adjust += bpf_stack_adjust; + + /* + * First instruction initializes the tail call count (TCC). + * On tail call we skip this instruction, and the TCC is + * passed in REG_TCC from the caller. + */ + emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT); + + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust); + + store_offset = stack_adjust - sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset); + + store_offset -= sizeof(long); + emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset); + + emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust); + + if (bpf_stack_adjust) + emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust); + + /* + * Program contains calls and tail calls, so REG_TCC need + * to be saved across calls. + */ + if (seen_tail_call(ctx) && seen_call(ctx)) + move_reg(ctx, TCC_SAVED, REG_TCC); + + ctx->stack_size = stack_adjust; +} + +static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) +{ + int stack_adjust = ctx->stack_size; + int load_offset; + + load_offset = stack_adjust - sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset); + + load_offset -= sizeof(long); + emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset); + + emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust); + + if (!is_tail_call) { + /* Set return value */ + move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]); + /* Return to the caller */ + emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); + } else { + /* + * Call the next bpf prog and skip the first instruction + * of TCC initialization. + */ + emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1); + } +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + __build_epilogue(ctx, false); +} + +bool bpf_jit_supports_kfunc_call(void) +{ + return true; +} + +/* initialized on the first pass of build_body() */ +static int out_offset = -1; +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + int off; + u8 tcc = tail_call_reg(ctx); + u8 a1 = LOONGARCH_GPR_A1; + u8 a2 = LOONGARCH_GPR_A2; + u8 t1 = LOONGARCH_GPR_T1; + u8 t2 = LOONGARCH_GPR_T2; + u8 t3 = LOONGARCH_GPR_T3; + const int idx0 = ctx->idx; + +#define cur_offset (ctx->idx - idx0) +#define jmp_offset (out_offset - (cur_offset)) + + /* + * a0: &ctx + * a1: &array + * a2: index + * + * if (index >= array->map.max_entries) + * goto out; + */ + off = offsetof(struct bpf_array, map.max_entries); + emit_insn(ctx, ldwu, t1, a1, off); + /* bgeu $a2, $t1, jmp_offset */ + if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0) + goto toofar; + + /* + * if (--TCC < 0) + * goto out; + */ + emit_insn(ctx, addid, REG_TCC, tcc, -1); + if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + + /* + * prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + emit_insn(ctx, alsld, t2, a2, a1, 2); + off = offsetof(struct bpf_array, ptrs); + emit_insn(ctx, ldd, t2, t2, off); + /* beq $t2, $zero, jmp_offset */ + if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + + /* goto *(prog->bpf_func + 4); */ + off = offsetof(struct bpf_prog, bpf_func); + emit_insn(ctx, ldd, t3, t2, off); + __build_epilogue(ctx, true); + + /* out: */ + if (out_offset == -1) + out_offset = cur_offset; + if (cur_offset != out_offset) { + pr_err_once("tail_call out_offset = %d, expected %d!\n", + cur_offset, out_offset); + return -1; + } + + return 0; + +toofar: + pr_info_once("tail_call: jump too far\n"); + return -1; +#undef cur_offset +#undef jmp_offset +} + +static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 t1 = LOONGARCH_GPR_T1; + const u8 t2 = LOONGARCH_GPR_T2; + const u8 t3 = LOONGARCH_GPR_T3; + const u8 src = regmap[insn->src_reg]; + const u8 dst = regmap[insn->dst_reg]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const bool isdw = BPF_SIZE(insn->code) == BPF_DW; + + move_imm(ctx, t1, off, false); + emit_insn(ctx, addd, t1, dst, t1); + move_reg(ctx, t3, src); + + switch (imm) { + /* lock *(size *)(dst + off) <op>= src */ + case BPF_ADD: + if (isdw) + emit_insn(ctx, amaddd, t2, t1, src); + else + emit_insn(ctx, amaddw, t2, t1, src); + break; + case BPF_AND: + if (isdw) + emit_insn(ctx, amandd, t2, t1, src); + else + emit_insn(ctx, amandw, t2, t1, src); + break; + case BPF_OR: + if (isdw) + emit_insn(ctx, amord, t2, t1, src); + else + emit_insn(ctx, amorw, t2, t1, src); + break; + case BPF_XOR: + if (isdw) + emit_insn(ctx, amxord, t2, t1, src); + else + emit_insn(ctx, amxorw, t2, t1, src); + break; + /* src = atomic_fetch_<op>(dst + off, src) */ + case BPF_ADD | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amaddd, src, t1, t3); + } else { + emit_insn(ctx, amaddw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_AND | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amandd, src, t1, t3); + } else { + emit_insn(ctx, amandw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_OR | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amord, src, t1, t3); + } else { + emit_insn(ctx, amorw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + case BPF_XOR | BPF_FETCH: + if (isdw) { + emit_insn(ctx, amxord, src, t1, t3); + } else { + emit_insn(ctx, amxorw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + /* src = atomic_xchg(dst + off, src); */ + case BPF_XCHG: + if (isdw) { + emit_insn(ctx, amswapd, src, t1, t3); + } else { + emit_insn(ctx, amswapw, src, t1, t3); + emit_zext_32(ctx, src, true); + } + break; + /* r0 = atomic_cmpxchg(dst + off, r0, src); */ + case BPF_CMPXCHG: + u8 r0 = regmap[BPF_REG_0]; + + move_reg(ctx, t2, r0); + if (isdw) { + emit_insn(ctx, lld, r0, t1, 0); + emit_insn(ctx, bne, t2, r0, 4); + move_reg(ctx, t3, src); + emit_insn(ctx, scd, t3, t1, 0); + emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4); + } else { + emit_insn(ctx, llw, r0, t1, 0); + emit_zext_32(ctx, t2, true); + emit_zext_32(ctx, r0, true); + emit_insn(ctx, bne, t2, r0, 4); + move_reg(ctx, t3, src); + emit_insn(ctx, scw, t3, t1, 0); + emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6); + emit_zext_32(ctx, r0, true); + } + break; + } +} + +static bool is_signed_bpf_cond(u8 cond) +{ + return cond == BPF_JSGT || cond == BPF_JSLT || + cond == BPF_JSGE || cond == BPF_JSLE; +} + +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass) +{ + const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || + BPF_CLASS(insn->code) == BPF_JMP32; + const u8 code = insn->code; + const u8 cond = BPF_OP(code); + const u8 t1 = LOONGARCH_GPR_T1; + const u8 t2 = LOONGARCH_GPR_T2; + const u8 src = regmap[insn->src_reg]; + const u8 dst = regmap[insn->dst_reg]; + const s16 off = insn->off; + const s32 imm = insn->imm; + int jmp_offset; + int i = insn - ctx->prog->insnsi; + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + move_reg(ctx, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + move_imm(ctx, dst, imm, is32); + break; + + /* dst = dst + src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit_insn(ctx, addd, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst + imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + if (is_signed_imm12(imm)) { + emit_insn(ctx, addid, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, addd, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst - src */ + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit_insn(ctx, subd, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst - imm */ + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (is_signed_imm12(-imm)) { + emit_insn(ctx, addid, dst, dst, -imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, subd, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst * src */ + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit_insn(ctx, muld, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst * imm */ + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, muld, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst / src */ + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst / imm */ + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, divdu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst % src */ + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_zext_32(ctx, dst, is32); + move_reg(ctx, t1, src); + emit_zext_32(ctx, t1, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst % imm */ + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + move_imm(ctx, t1, imm, is32); + emit_zext_32(ctx, dst, is32); + emit_insn(ctx, moddu, dst, dst, t1); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst & src */ + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit_insn(ctx, and, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst & imm */ + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, andi, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, and, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst | src */ + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit_insn(ctx, or, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst | imm */ + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, ori, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, or, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst ^ src */ + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit_insn(ctx, xor, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst ^ imm */ + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, xori, dst, dst, imm); + } else { + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, xor, dst, dst, t1); + } + emit_zext_32(ctx, dst, is32); + break; + + /* dst = dst << src (logical) */ + case BPF_ALU | BPF_LSH | BPF_X: + emit_insn(ctx, sllw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_insn(ctx, slld, dst, dst, src); + break; + + /* dst = dst << imm (logical) */ + case BPF_ALU | BPF_LSH | BPF_K: + emit_insn(ctx, slliw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_LSH | BPF_K: + emit_insn(ctx, sllid, dst, dst, imm); + break; + + /* dst = dst >> src (logical) */ + case BPF_ALU | BPF_RSH | BPF_X: + emit_insn(ctx, srlw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_insn(ctx, srld, dst, dst, src); + break; + + /* dst = dst >> imm (logical) */ + case BPF_ALU | BPF_RSH | BPF_K: + emit_insn(ctx, srliw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_RSH | BPF_K: + emit_insn(ctx, srlid, dst, dst, imm); + break; + + /* dst = dst >> src (arithmetic) */ + case BPF_ALU | BPF_ARSH | BPF_X: + emit_insn(ctx, sraw, dst, dst, src); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_insn(ctx, srad, dst, dst, src); + break; + + /* dst = dst >> imm (arithmetic) */ + case BPF_ALU | BPF_ARSH | BPF_K: + emit_insn(ctx, sraiw, dst, dst, imm); + emit_zext_32(ctx, dst, is32); + break; + + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit_insn(ctx, sraid, dst, dst, imm); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ + emit_insn(ctx, bstrpickd, dst, dst, 15, 0); + break; + case 32: + /* zero-extend 32 bits into 64 bits */ + emit_zext_32(ctx, dst, is32); + break; + case 64: + /* do nothing */ + break; + } + break; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (imm) { + case 16: + emit_insn(ctx, revb2h, dst, dst); + /* zero-extend 16 bits into 64 bits */ + emit_insn(ctx, bstrpickd, dst, dst, 15, 0); + break; + case 32: + emit_insn(ctx, revb2w, dst, dst); + /* zero-extend 32 bits into 64 bits */ + emit_zext_32(ctx, dst, is32); + break; + case 64: + emit_insn(ctx, revbd, dst, dst); + break; + } + break; + + /* PC += off if dst cond src */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + jmp_offset = bpf2la_offset(i, off, ctx); + move_reg(ctx, t1, dst); + move_reg(ctx, t2, src); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sext_32(ctx, t1, is32); + emit_sext_32(ctx, t2, is32); + } else { + emit_zext_32(ctx, t1, is32); + emit_zext_32(ctx, t2, is32); + } + if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst cond imm */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + u8 t7 = -1; + jmp_offset = bpf2la_offset(i, off, ctx); + if (imm) { + move_imm(ctx, t1, imm, false); + t7 = t1; + } else { + /* If imm is 0, simply use zero register. */ + t7 = LOONGARCH_GPR_ZERO; + } + move_reg(ctx, t2, dst); + if (is_signed_bpf_cond(BPF_OP(code))) { + emit_sext_32(ctx, t7, is32); + emit_sext_32(ctx, t2, is32); + } else { + emit_zext_32(ctx, t7, is32); + emit_zext_32(ctx, t2, is32); + } + if (emit_cond_jmp(ctx, cond, t2, t7, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst & src */ + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + jmp_offset = bpf2la_offset(i, off, ctx); + emit_insn(ctx, and, t1, dst, src); + emit_zext_32(ctx, t1, is32); + if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off if dst & imm */ + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + jmp_offset = bpf2la_offset(i, off, ctx); + move_imm(ctx, t1, imm, is32); + emit_insn(ctx, and, t1, dst, t1); + emit_zext_32(ctx, t1, is32); + if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0) + goto toofar; + break; + + /* PC += off */ + case BPF_JMP | BPF_JA: + jmp_offset = bpf2la_offset(i, off, ctx); + if (emit_uncond_jmp(ctx, jmp_offset) < 0) + goto toofar; + break; + + /* function call */ + case BPF_JMP | BPF_CALL: + int ret; + u64 func_addr; + bool func_addr_fixed; + + mark_call(ctx); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, + &func_addr, &func_addr_fixed); + if (ret < 0) + return ret; + + move_imm(ctx, t1, func_addr, is32); + emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0); + move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0); + break; + + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + mark_tail_call(ctx); + if (emit_bpf_tail_call(ctx) < 0) + return -EINVAL; + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + emit_sext_32(ctx, regmap[BPF_REG_0], true); + + if (i == ctx->prog->len - 1) + break; + + jmp_offset = epilogue_offset(ctx); + if (emit_uncond_jmp(ctx, jmp_offset) < 0) + goto toofar; + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; + + move_imm(ctx, dst, imm64, is32); + return 1; + + /* dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldbu, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxbu, dst, src, t1); + } + break; + case BPF_H: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldhu, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxhu, dst, src, t1); + } + break; + case BPF_W: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldwu, dst, src, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, ldptrw, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxwu, dst, src, t1); + } + break; + case BPF_DW: + if (is_signed_imm12(off)) { + emit_insn(ctx, ldd, dst, src, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, ldptrd, dst, src, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, ldxd, dst, src, t1); + } + break; + } + break; + + /* *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, stb, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxb, t1, dst, t2); + } + break; + case BPF_H: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, sth, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxh, t1, dst, t2); + } + break; + case BPF_W: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, stw, t1, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrw, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxw, t1, dst, t2); + } + break; + case BPF_DW: + move_imm(ctx, t1, imm, is32); + if (is_signed_imm12(off)) { + emit_insn(ctx, std, t1, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrd, t1, dst, off); + } else { + move_imm(ctx, t2, off, is32); + emit_insn(ctx, stxd, t1, dst, t2); + } + break; + } + break; + + /* *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_B: + if (is_signed_imm12(off)) { + emit_insn(ctx, stb, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxb, src, dst, t1); + } + break; + case BPF_H: + if (is_signed_imm12(off)) { + emit_insn(ctx, sth, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxh, src, dst, t1); + } + break; + case BPF_W: + if (is_signed_imm12(off)) { + emit_insn(ctx, stw, src, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrw, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxw, src, dst, t1); + } + break; + case BPF_DW: + if (is_signed_imm12(off)) { + emit_insn(ctx, std, src, dst, off); + } else if (is_signed_imm14(off)) { + emit_insn(ctx, stptrd, src, dst, off); + } else { + move_imm(ctx, t1, off, is32); + emit_insn(ctx, stxd, src, dst, t1); + } + break; + } + break; + + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + emit_atomic(insn, ctx); + break; + + default: + pr_err("bpf_jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; + +toofar: + pr_info_once("bpf_jit: opcode %02x, jump too far\n", code); + return -E2BIG; +} + +static int build_body(struct jit_ctx *ctx, bool extra_pass) +{ + int i; + const struct bpf_prog *prog = ctx->prog; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + + ret = build_insn(insn, ctx, extra_pass); + if (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + continue; + } + if (ret) + return ret; + } + + if (ctx->image == NULL) + ctx->offset[i] = ctx->idx; + + return 0; +} + +/* Fill space with break instructions */ +static void jit_fill_hole(void *area, unsigned int size) +{ + u32 *ptr; + + /* We are guaranteed to have aligned memory */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = INSN_BREAK; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + union loongarch_instruction insn; + + for (i = 0; i < ctx->idx; i++) { + insn = ctx->image[i]; + /* Check INSN_BREAK */ + if (insn.word == INSN_BREAK) + return -1; + } + + return 0; +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + bool tmp_blinded = false, extra_pass = false; + u8 *image_ptr; + int image_size; + struct jit_ctx ctx; + struct jit_data *jit_data; + struct bpf_binary_header *header; + struct bpf_prog *tmp, *orig_prog = prog; + + /* + * If BPF JIT was not enabled then we must fall back to + * the interpreter. + */ + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* + * If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. Otherwise, we save + * the new JITed code. + */ + if (IS_ERR(tmp)) + return orig_prog; + + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + image_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL); + if (ctx.offset == NULL) { + prog = orig_prog; + goto out_offset; + } + + /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */ + build_prologue(&ctx); + if (build_body(&ctx, extra_pass)) { + prog = orig_prog; + goto out_offset; + } + ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + /* Now we know the actual image size. + * As each LoongArch instruction is of length 32bit, + * we are translating number of JITed intructions into + * the size required to store these JITed code. + */ + image_size = sizeof(u32) * ctx.idx; + /* Now we know the size of the structure to make */ + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_offset; + } + + /* 2. Now, the actual pass to generate final JIT code */ + ctx.image = (union loongarch_instruction *)image_ptr; + +skip_init_ctx: + ctx.idx = 0; + + build_prologue(&ctx); + if (build_body(&ctx, extra_pass)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_offset; + } + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_offset; + } + + /* And we're done */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx.image); + + /* Update the icache */ + flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); + + if (!prog->is_func || extra_pass) { + if (extra_pass && ctx.idx != jit_data->ctx.idx) { + pr_err_once("multi-func JIT bug %d != %d\n", + ctx.idx, jit_data->ctx.idx); + bpf_jit_binary_free(header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; + } + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->jited = 1; + prog->jited_len = image_size; + prog->bpf_func = (void *)ctx.image; + + if (!prog->is_func || extra_pass) { + int i; + + /* offset[prog->len] is the size of program */ + for (i = 0; i <= prog->len; i++) + ctx.offset[i] *= LOONGARCH_INSN_SIZE; + bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); + +out_offset: + kvfree(ctx.offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } + +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); + + out_offset = -1; + + return prog; +} diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h new file mode 100644 index 000000000000..e665ddb0aeb8 --- /dev/null +++ b/arch/loongarch/net/bpf_jit.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * BPF JIT compiler for LoongArch + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include <linux/bpf.h> +#include <linux/filter.h> +#include <asm/cacheflush.h> +#include <asm/inst.h> + +struct jit_ctx { + const struct bpf_prog *prog; + unsigned int idx; + unsigned int flags; + unsigned int epilogue_offset; + u32 *offset; + union loongarch_instruction *image; + u32 stack_size; +}; + +struct jit_data { + struct bpf_binary_header *header; + u8 *image; + struct jit_ctx ctx; +}; + +#define emit_insn(ctx, func, ...) \ +do { \ + if (ctx->image != NULL) { \ + union loongarch_instruction *insn = &ctx->image[ctx->idx]; \ + emit_##func(insn, ##__VA_ARGS__); \ + } \ + ctx->idx++; \ +} while (0) + +#define is_signed_imm12(val) signed_imm_check(val, 12) +#define is_signed_imm14(val) signed_imm_check(val, 14) +#define is_signed_imm16(val) signed_imm_check(val, 16) +#define is_signed_imm26(val) signed_imm_check(val, 26) +#define is_signed_imm32(val) signed_imm_check(val, 32) +#define is_signed_imm52(val) signed_imm_check(val, 52) +#define is_unsigned_imm12(val) unsigned_imm_check(val, 12) + +static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx) +{ + /* BPF JMP offset is relative to the next instruction */ + bpf_insn++; + /* + * Whereas LoongArch branch instructions encode the offset + * from the branch itself, so we must subtract 1 from the + * instruction offset. + */ + return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1)); +} + +static inline int epilogue_offset(const struct jit_ctx *ctx) +{ + int from = ctx->idx; + int to = ctx->epilogue_offset; + + return (to - from); +} + +/* Zero-extend 32 bits into 64 bits */ +static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) +{ + if (!is32) + return; + + emit_insn(ctx, lu32id, reg, 0); +} + +/* Signed-extend 32 bits into 64 bits */ +static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32) +{ + if (!is32) + return; + + emit_insn(ctx, addiw, reg, reg, 0); +} + +static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32) +{ + long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31; + + /* or rd, $zero, $zero */ + if (imm == 0) { + emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO); + return; + } + + /* addiw rd, $zero, imm_11_0 */ + if (is_signed_imm12(imm)) { + emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm); + goto zext; + } + + /* ori rd, $zero, imm_11_0 */ + if (is_unsigned_imm12(imm)) { + emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm); + goto zext; + } + + /* lu52id rd, $zero, imm_63_52 */ + imm_63_52 = (imm >> 52) & 0xfff; + imm_51_0 = imm & 0xfffffffffffff; + if (imm_63_52 != 0 && imm_51_0 == 0) { + emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52); + return; + } + + /* lu12iw rd, imm_31_12 */ + imm_31_12 = (imm >> 12) & 0xfffff; + emit_insn(ctx, lu12iw, rd, imm_31_12); + + /* ori rd, rd, imm_11_0 */ + imm_11_0 = imm & 0xfff; + if (imm_11_0 != 0) + emit_insn(ctx, ori, rd, rd, imm_11_0); + + if (!is_signed_imm32(imm)) { + if (imm_51_0 != 0) { + /* + * If bit[51:31] is all 0 or all 1, + * it means bit[51:32] is sign extended by lu12iw, + * no need to call lu32id to do a new filled operation. + */ + imm_51_31 = (imm >> 31) & 0x1fffff; + if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) { + /* lu32id rd, imm_51_32 */ + imm_51_32 = (imm >> 32) & 0xfffff; + emit_insn(ctx, lu32id, rd, imm_51_32); + } + } + + /* lu52id rd, rd, imm_63_52 */ + if (!is_signed_imm52(imm)) + emit_insn(ctx, lu52id, rd, rd, imm_63_52); + } + +zext: + emit_zext_32(ctx, rd, is32); +} + +static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd, + enum loongarch_gpr rj) +{ + emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO); +} + +static inline int invert_jmp_cond(u8 cond) +{ + switch (cond) { + case BPF_JEQ: + return BPF_JNE; + case BPF_JNE: + case BPF_JSET: + return BPF_JEQ; + case BPF_JGT: + return BPF_JLE; + case BPF_JGE: + return BPF_JLT; + case BPF_JLT: + return BPF_JGE; + case BPF_JLE: + return BPF_JGT; + case BPF_JSGT: + return BPF_JSLE; + case BPF_JSGE: + return BPF_JSLT; + case BPF_JSLT: + return BPF_JSGE; + case BPF_JSLE: + return BPF_JSGT; + } + return -1; +} + +static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + switch (cond) { + case BPF_JEQ: + /* PC += jmp_offset if rj == rd */ + emit_insn(ctx, beq, rj, rd, jmp_offset); + return; + case BPF_JNE: + case BPF_JSET: + /* PC += jmp_offset if rj != rd */ + emit_insn(ctx, bne, rj, rd, jmp_offset); + return; + case BPF_JGT: + /* PC += jmp_offset if rj > rd (unsigned) */ + emit_insn(ctx, bltu, rd, rj, jmp_offset); + return; + case BPF_JLT: + /* PC += jmp_offset if rj < rd (unsigned) */ + emit_insn(ctx, bltu, rj, rd, jmp_offset); + return; + case BPF_JGE: + /* PC += jmp_offset if rj >= rd (unsigned) */ + emit_insn(ctx, bgeu, rj, rd, jmp_offset); + return; + case BPF_JLE: + /* PC += jmp_offset if rj <= rd (unsigned) */ + emit_insn(ctx, bgeu, rd, rj, jmp_offset); + return; + case BPF_JSGT: + /* PC += jmp_offset if rj > rd (signed) */ + emit_insn(ctx, blt, rd, rj, jmp_offset); + return; + case BPF_JSLT: + /* PC += jmp_offset if rj < rd (signed) */ + emit_insn(ctx, blt, rj, rd, jmp_offset); + return; + case BPF_JSGE: + /* PC += jmp_offset if rj >= rd (signed) */ + emit_insn(ctx, bge, rj, rd, jmp_offset); + return; + case BPF_JSLE: + /* PC += jmp_offset if rj <= rd (signed) */ + emit_insn(ctx, bge, rd, rj, jmp_offset); + return; + } +} + +static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + cond = invert_jmp_cond(cond); + cond_jmp_offset(ctx, cond, rj, rd, 2); + emit_insn(ctx, b, jmp_offset); +} + +static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset) +{ + emit_insn(ctx, b, jmp_offset); +} + +static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + /* + * A large PC-relative jump offset may overflow the immediate field of + * the native conditional branch instruction, triggering a conversion + * to use an absolute jump instead, this jump sequence is particularly + * nasty. For now, use cond_jmp_offs26() directly to keep it simple. + * In the future, maybe we can add support for far branching, the branch + * relaxation requires more than two passes to converge, the code seems + * too complex to understand, not quite sure whether it is necessary and + * worth the extra pain. Anyway, just leave it as it is to enhance code + * readability now. + */ + if (is_signed_imm26(jmp_offset)) { + cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset); + return 0; + } + + return -EINVAL; +} + +static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset) +{ + if (is_signed_imm26(jmp_offset)) { + uncond_jmp_offs26(ctx, jmp_offset); + return 0; + } + + return -EINVAL; +} + +static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, + enum loongarch_gpr rd, int jmp_offset) +{ + if (is_signed_imm16(jmp_offset)) { + cond_jmp_offset(ctx, cond, rj, rd, jmp_offset); + return 0; + } + + return -EINVAL; +} diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index bf921487333c..8235ec92b41f 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -83,6 +83,69 @@ static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) } /* + * Create a PCI config space window + * - reserve mem region + * - alloc struct pci_config_window with space for all mappings + * - ioremap the config space + */ +static struct pci_config_window *arch_pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, const struct pci_ecam_ops *ops) +{ + int bsz, bus_range, err; + struct resource *conflict; + struct pci_config_window *cfg; + + if (busr->start > busr->end) + return ERR_PTR(-EINVAL); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return ERR_PTR(-ENOMEM); + + cfg->parent = dev; + cfg->ops = ops; + cfg->busr.start = busr->start; + cfg->busr.end = busr->end; + cfg->busr.flags = IORESOURCE_BUS; + bus_range = resource_size(cfgres) >> ops->bus_shift; + + bsz = 1 << ops->bus_shift; + + cfg->res.start = cfgres->start; + cfg->res.end = cfgres->end; + cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + cfg->res.name = "PCI ECAM"; + + conflict = request_resource_conflict(&iomem_resource, &cfg->res); + if (conflict) { + err = -EBUSY; + dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n", + &cfg->res, conflict->name, conflict); + goto err_exit; + } + + cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); + if (!cfg->win) + goto err_exit_iomap; + + if (ops->init) { + err = ops->init(cfg); + if (err) + goto err_exit; + } + dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr); + + return cfg; + +err_exit_iomap: + err = -ENOMEM; + dev_err(dev, "ECAM ioremap failed\n"); +err_exit: + pci_ecam_free(cfg); + return ERR_PTR(err); +} + +/* * Lookup the bus range for the domain in MCFG, and set up config space * mapping. */ @@ -106,11 +169,16 @@ pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) bus_shift = ecam_ops->bus_shift ? : 20; - cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift); - cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1; - cfgres.flags = IORESOURCE_MEM; + if (bus_shift == 20) + cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + else { + cfgres.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfgres.end = cfgres.start + (resource_size(bus_res) << bus_shift) - 1; + cfgres.end |= BIT(28) + (((PCI_CFG_SPACE_EXP_SIZE - 1) & 0xf00) << 16); + cfgres.flags = IORESOURCE_MEM; + cfg = arch_pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); + } - cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); if (IS_ERR(cfg)) { dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, PTR_ERR(cfg)); return NULL; diff --git a/arch/loongarch/pci/pci.c b/arch/loongarch/pci/pci.c index e9b7c34d9b6d..2726639150bc 100644 --- a/arch/loongarch/pci/pci.c +++ b/arch/loongarch/pci/pci.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/vgaarb.h> +#include <asm/cacheflush.h> #include <asm/loongson.h> #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 @@ -45,12 +46,10 @@ static int __init pcibios_init(void) unsigned int lsize; /* - * Set PCI cacheline size to that of the highest level in the + * Set PCI cacheline size to that of the last level in the * cache hierarchy. */ - lsize = cpu_dcache_line_size(); - lsize = cpu_vcache_line_size() ? : lsize; - lsize = cpu_scache_line_size() ? : lsize; + lsize = cpu_last_level_cache_line_size(); BUG_ON(!lsize); diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index d86b4009880b..7a2da780830b 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -145,11 +145,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, /* Forward declaration, a strange C thing */ struct task_struct; -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - unsigned long __get_wchan(struct task_struct *p); void show_registers(struct pt_regs *regs); diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index 51337fffb947..8150daf04a76 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -83,7 +83,7 @@ CONFIG_CIFS=y CONFIG_CIFS_STATS2=y CONFIG_ENCRYPTED_KEYS=y CONFIG_DMA_CMA=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_KGDB=y CONFIG_KGDB_TESTS=y CONFIG_KGDB_KDB=y diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 7e9e92670df3..4e193c7550df 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h @@ -63,11 +63,6 @@ struct thread_struct { .pgdir = swapper_pg_dir, \ } -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - unsigned long __get_wchan(struct task_struct *p); /* The size allocated for kernel stacks. This _must_ be a power of two! */ diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig index 91ce75edbfb4..22ffde722bb9 100644 --- a/arch/mips/configs/bcm47xx_defconfig +++ b/arch/mips/configs/bcm47xx_defconfig @@ -72,7 +72,7 @@ CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_CRC32_SARWATE=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_INFO_REDUCED=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index a2311495af79..0bc2e3cc573b 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -161,7 +161,7 @@ CONFIG_CRYPTO_SHA1_OCTEON=m CONFIG_CRYPTO_SHA256_OCTEON=m CONFIG_CRYPTO_SHA512_OCTEON=m CONFIG_CRYPTO_DES=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index e1b49f77414a..11f08b6a3013 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -199,7 +199,7 @@ CONFIG_NLS_UTF8=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=32 CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y diff --git a/arch/mips/configs/cu1000-neo_defconfig b/arch/mips/configs/cu1000-neo_defconfig index 5bd55eb32fe5..1cbc9302e1d1 100644 --- a/arch/mips/configs/cu1000-neo_defconfig +++ b/arch/mips/configs/cu1000-neo_defconfig @@ -113,7 +113,7 @@ CONFIG_PRINTK_TIME=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 CONFIG_CONSOLE_LOGLEVEL_QUIET=15 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y diff --git a/arch/mips/configs/cu1830-neo_defconfig b/arch/mips/configs/cu1830-neo_defconfig index cc69688962e8..a0f73f3cd6ce 100644 --- a/arch/mips/configs/cu1830-neo_defconfig +++ b/arch/mips/configs/cu1830-neo_defconfig @@ -116,7 +116,7 @@ CONFIG_PRINTK_TIME=y CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 CONFIG_CONSOLE_LOGLEVEL_QUIET=15 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7 -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 48e4e251779b..c2cd2b181ef3 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -82,7 +82,7 @@ CONFIG_ROOT_NFS=y # CONFIG_XZ_DEC_ARMTHUMB is not set # CONFIG_XZ_DEC_SPARC is not set CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_INFO_REDUCED=y CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig index 9c34daf83563..91fe2822f897 100644 --- a/arch/mips/configs/omega2p_defconfig +++ b/arch/mips/configs/omega2p_defconfig @@ -113,7 +113,7 @@ CONFIG_CRYPTO_LZO=y CONFIG_CRC16=y CONFIG_XZ_DEC=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig index b4448d0876d5..7e5d9741bd5d 100644 --- a/arch/mips/configs/qi_lb60_defconfig +++ b/arch/mips/configs/qi_lb60_defconfig @@ -166,7 +166,7 @@ CONFIG_NLS_UTF8=y CONFIG_FONTS=y CONFIG_FONT_SUN8x16=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_READABLE_ASM=y CONFIG_KGDB=y diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig index 0722a3bf03c0..e47d4cc3353b 100644 --- a/arch/mips/configs/vocore2_defconfig +++ b/arch/mips/configs/vocore2_defconfig @@ -113,7 +113,7 @@ CONFIG_CRYPTO_LZO=y CONFIG_CRC16=y CONFIG_XZ_DEC=y CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 4bb24579d12e..3fde1ff72bd1 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -344,9 +344,6 @@ struct thread_struct { struct task_struct; -/* Free all resources held by a thread. */ -#define release_thread(thread) do { } while(0) - /* * Do necessary setup to start up a newly executed thread. */ diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig index a7967b4cfb6e..91c3fce4dc7f 100644 --- a/arch/nios2/configs/10m50_defconfig +++ b/arch/nios2/configs/10m50_defconfig @@ -74,4 +74,4 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_SUNRPC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig index 423a0c40a162..c42ad7e162a3 100644 --- a/arch/nios2/configs/3c120_defconfig +++ b/arch/nios2/configs/3c120_defconfig @@ -71,4 +71,4 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_SUNRPC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h index b8125dfbcad2..8916d93d5c2d 100644 --- a/arch/nios2/include/asm/processor.h +++ b/arch/nios2/include/asm/processor.h @@ -64,11 +64,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long pc, struct task_struct; -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - extern unsigned long __get_wchan(struct task_struct *p); #define task_pt_regs(p) \ diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index aa1699c18add..ed9efb430afa 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h @@ -72,7 +72,6 @@ struct thread_struct { void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp); -void release_thread(struct task_struct *); unsigned long __get_wchan(struct task_struct *p); #define cpu_relax() barrier() diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 52dc983ddeba..f94b5ec06786 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -125,10 +125,6 @@ void show_regs(struct pt_regs *regs) show_registers(regs); } -void release_thread(struct task_struct *dead_task) -{ -} - /* * Copy the thread-specific (arch specific) info from the current * process to the new one p diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 4621ceb51314..a608970b249a 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -266,9 +266,6 @@ on downward growing arches, it looks like this: struct mm_struct; -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - extern unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0]) diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 7c37e09c92da..3db0e97e6c06 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -146,10 +146,6 @@ void flush_thread(void) */ } -void release_thread(struct task_struct *dead_task) -{ -} - /* * Idle thread support * diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 9bff4ab8242d..631802999d59 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -75,7 +75,6 @@ extern int _chrp_type; struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); -void release_thread(struct task_struct *); #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET] diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 37df0428e4fb..40834ef84f0c 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1655,11 +1655,6 @@ EXPORT_SYMBOL_GPL(set_thread_tidr); #endif /* CONFIG_PPC64 */ -void -release_thread(struct task_struct *t) -{ -} - /* * this gets called so that we can store coprocessor state into memory and * copy the current task into the new thread. diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 19eedd4af4cd..94a0590c6971 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -65,11 +65,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - extern unsigned long __get_wchan(struct task_struct *p); diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 4eb9b7875e13..87be3e855bf7 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -186,9 +186,6 @@ struct pt_regs; void show_registers(struct pt_regs *regs); void show_cacheinfo(struct seq_file *m); -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *tsk) { } - /* Free guarded storage control block */ void guarded_storage_release(struct task_struct *tsk); void gs_load_bc_cb(struct pt_regs *regs); diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig index 530498f18990..99931a13a74d 100644 --- a/arch/sh/configs/apsh4a3a_defconfig +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -85,7 +85,7 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y # CONFIG_FTRACE is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index 6abd9bd70106..d9fb124bf015 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig @@ -116,7 +116,7 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_VM=y CONFIG_DWARF_UNWINDER=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index d77f54e906fd..f427a95bcd21 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -107,7 +107,7 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_CRYPTO=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig index 0989ed929540..ef1d98e35c91 100644 --- a/arch/sh/configs/magicpanelr2_defconfig +++ b/arch/sh/configs/magicpanelr2_defconfig @@ -84,7 +84,7 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set CONFIG_DEBUG_KOBJECT=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_POINTER=y CONFIG_CRC_CCITT=m CONFIG_CRC16=m diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig index 246408ec7462..f42e4867ddc1 100644 --- a/arch/sh/configs/polaris_defconfig +++ b/arch/sh/configs/polaris_defconfig @@ -79,5 +79,5 @@ CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_SPINLOCK_SLEEP=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_SG=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index f823cc6b18f9..e527cd60a191 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -101,7 +101,7 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_PREEMPT is not set -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index f96bc20d4b1a..a3f952a83d97 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -96,7 +96,7 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_LOCKING_API_SELFTESTS=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_SH_STANDARD_BIOS=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_4KSTACKS=y diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig index 5a54e2b883f0..d00fafc021e1 100644 --- a/arch/sh/configs/rsk7203_defconfig +++ b/arch/sh/configs/rsk7203_defconfig @@ -112,7 +112,7 @@ CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_LIST=y CONFIG_DEBUG_SG=y diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index 7d6d32359848..41cb588ca99c 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig @@ -131,7 +131,7 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set CONFIG_TIMER_STATS=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_SH_STANDARD_BIOS=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_DES=y diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig index ee6d28ae08de..36356223d51c 100644 --- a/arch/sh/configs/se7712_defconfig +++ b/arch/sh/configs/se7712_defconfig @@ -93,7 +93,7 @@ CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_POINTER=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig index bad921bc10f8..46c5a263a239 100644 --- a/arch/sh/configs/se7721_defconfig +++ b/arch/sh/configs/se7721_defconfig @@ -121,7 +121,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_POINTER=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_CCITT=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 79f02f1c0dc8..259c69e3fa22 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -159,7 +159,7 @@ CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y # CONFIG_DETECT_SOFTLOCKUP is not set # CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_POINTER=y CONFIG_SH_STANDARD_BIOS=y CONFIG_CRYPTO_NULL=y diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index a2700ab165af..2579dc4bc0c8 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -80,6 +80,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_DEBUG_KERNEL=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y # CONFIG_FTRACE is not set # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 7eb3c10f28ad..781ff13227fc 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -141,7 +141,7 @@ CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_LATENCYTOP=y # CONFIG_FTRACE is not set CONFIG_CRYPTO_HMAC=y diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index be478f3148f2..8fc687c98fd1 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -138,7 +138,7 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_POINTER=y # CONFIG_FTRACE is not set # CONFIG_DUMP_CODE is not set diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 45240ec6b85a..27aebf1e75a2 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -127,9 +127,6 @@ struct task_struct; extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp); -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - /* * FPU lazy state save handling. */ diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index a808843375e7..92b6649d4929 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -84,11 +84,6 @@ void flush_thread(void) #endif } -void release_thread(struct task_struct *dead_task) -{ - /* do nothing */ -} - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index b26c35336b51..ba8b70ffec08 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -80,9 +80,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, : "memory"); } -/* Free all resources held by a thread. */ -#define release_thread(tsk) do { } while(0) - unsigned long __get_wchan(struct task_struct *); #define task_pt_regs(tsk) ((tsk)->thread.kregs) diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 89850dff6b03..2667f35d5ea5 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -176,9 +176,6 @@ do { \ regs->tstate &= ~TSTATE_PEF; \ } while (0) -/* Free all resources held by a thread. */ -#define release_thread(tsk) do { } while (0) - unsigned long __get_wchan(struct task_struct *task); #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig index fb51bd206dbe..c0162286d68b 100644 --- a/arch/um/configs/i386_defconfig +++ b/arch/um/configs/i386_defconfig @@ -69,5 +69,5 @@ CONFIG_JOLIET=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NLS=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig index 477b87317424..bec6e5d95687 100644 --- a/arch/um/configs/x86_64_defconfig +++ b/arch/um/configs/x86_64_defconfig @@ -67,6 +67,6 @@ CONFIG_JOLIET=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NLS=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_FRAME_WARN=1024 CONFIG_DEBUG_KERNEL=y diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h index d0fc1862da95..bb5f06480da9 100644 --- a/arch/um/include/asm/processor-generic.h +++ b/arch/um/include/asm/processor-generic.h @@ -55,10 +55,6 @@ struct thread_struct { .request = { 0 } \ } -static inline void release_thread(struct task_struct *task) -{ -} - /* * User space process size: 3GB (default). */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 356308c73951..67c9d73b31fa 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -587,9 +587,6 @@ static inline void load_sp0(unsigned long sp0) #endif /* CONFIG_PARAVIRT_XXL */ -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - unsigned long __get_wchan(struct task_struct *p); /* diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 85246dd9faa1..9b1ec5d8c99c 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -92,3 +92,12 @@ config XEN_DOM0 select X86_X2APIC if XEN_PVH && X86_64 help Support running as a Xen Dom0 guest. + +config XEN_PV_MSR_SAFE + bool "Always use safe MSR accesses in PV guests" + default y + depends on XEN_PV + help + Use safe (not faulting) MSR access functions even if the MSR access + should not fault anyway. + The default can be changed by using the "xen_msr_safe" boot parameter. diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 1c1ac418484b..c1cd28e915a3 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -212,7 +212,7 @@ static void __init xen_hvm_guest_init(void) return; if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) - virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); init_hvm_pv_info(); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 9b1a58dda935..f82857e48815 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -108,11 +108,21 @@ struct tls_descs { */ static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); +static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE); + +static int __init parse_xen_msr_safe(char *str) +{ + if (str) + return strtobool(str, &xen_msr_safe); + return -EINVAL; +} +early_param("xen_msr_safe", parse_xen_msr_safe); + static void __init xen_pv_init_platform(void) { /* PV guests can't operate virtio devices without grants. */ if (IS_ENABLED(CONFIG_XEN_VIRTIO)) - virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP)); @@ -917,14 +927,18 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } -static u64 xen_read_msr_safe(unsigned int msr, int *err) +static u64 xen_do_read_msr(unsigned int msr, int *err) { - u64 val; + u64 val = 0; /* Avoid uninitialized value for safe variant. */ if (pmu_msr_read(msr, &val, err)) return val; - val = native_read_msr_safe(msr, err); + if (err) + val = native_read_msr_safe(msr, err); + else + val = native_read_msr(msr); + switch (msr) { case MSR_IA32_APICBASE: val &= ~X2APIC_ENABLE; @@ -933,23 +947,39 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err) return val; } -static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) +static void set_seg(unsigned int which, unsigned int low, unsigned int high, + int *err) { - int ret; - unsigned int which; - u64 base; + u64 base = ((u64)high << 32) | low; - ret = 0; + if (HYPERVISOR_set_segment_base(which, base) == 0) + return; + if (err) + *err = -EIO; + else + WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base); +} + +/* + * Support write_msr_safe() and write_msr() semantics. + * With err == NULL write_msr() semantics are selected. + * Supplying an err pointer requires err to be pre-initialized with 0. + */ +static void xen_do_write_msr(unsigned int msr, unsigned int low, + unsigned int high, int *err) +{ switch (msr) { - case MSR_FS_BASE: which = SEGBASE_FS; goto set; - case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set; - case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set; - - set: - base = ((u64)high << 32) | low; - if (HYPERVISOR_set_segment_base(which, base) != 0) - ret = -EIO; + case MSR_FS_BASE: + set_seg(SEGBASE_FS, low, high, err); + break; + + case MSR_KERNEL_GS_BASE: + set_seg(SEGBASE_GS_USER, low, high, err); + break; + + case MSR_GS_BASE: + set_seg(SEGBASE_GS_KERNEL, low, high, err); break; case MSR_STAR: @@ -965,31 +995,42 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) break; default: - if (!pmu_msr_write(msr, low, high, &ret)) - ret = native_write_msr_safe(msr, low, high); + if (!pmu_msr_write(msr, low, high, err)) { + if (err) + *err = native_write_msr_safe(msr, low, high); + else + native_write_msr(msr, low, high); + } } +} - return ret; +static u64 xen_read_msr_safe(unsigned int msr, int *err) +{ + return xen_do_read_msr(msr, err); +} + +static int xen_write_msr_safe(unsigned int msr, unsigned int low, + unsigned int high) +{ + int err = 0; + + xen_do_write_msr(msr, low, high, &err); + + return err; } static u64 xen_read_msr(unsigned int msr) { - /* - * This will silently swallow a #GP from RDMSR. It may be worth - * changing that. - */ int err; - return xen_read_msr_safe(msr, &err); + return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL); } static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) { - /* - * This will silently swallow a #GP from WRMSR. It may be worth - * changing that. - */ - xen_write_msr_safe(msr, low, high); + int err; + + xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL); } /* This is called once we have the cpu_possible_mask */ diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 21ecbe754cb2..68aff1382872 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -131,6 +131,10 @@ static inline uint32_t get_fam15h_addr(u32 addr) static inline bool is_amd_pmu_msr(unsigned int msr) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + if ((msr >= MSR_F15H_PERF_CTL && msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || (msr >= MSR_K7_EVNTSEL0 && @@ -140,10 +144,15 @@ static inline bool is_amd_pmu_msr(unsigned int msr) return false; } -static int is_intel_pmu_msr(u32 msr_index, int *type, int *index) +static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) { u32 msr_index_pmc; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + return false; + switch (msr_index) { case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_IA32_DS_AREA: @@ -290,48 +299,52 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) return false; } +static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, + bool *emul) +{ + int type, index; + + if (is_amd_pmu_msr(msr)) + *emul = xen_amd_pmu_emulate(msr, val, is_read); + else if (is_intel_pmu_msr(msr, &type, &index)) + *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read); + else + return false; + + return true; +} + bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { - if (is_amd_pmu_msr(msr)) { - if (!xen_amd_pmu_emulate(msr, val, 1)) - *val = native_read_msr_safe(msr, err); - return true; - } - } else { - int type, index; + bool emulated; - if (is_intel_pmu_msr(msr, &type, &index)) { - if (!xen_intel_pmu_emulate(msr, val, type, index, 1)) - *val = native_read_msr_safe(msr, err); - return true; - } + if (!pmu_msr_chk_emulated(msr, val, true, &emulated)) + return false; + + if (!emulated) { + *val = err ? native_read_msr_safe(msr, err) + : native_read_msr(msr); } - return false; + return true; } bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) { uint64_t val = ((uint64_t)high << 32) | low; + bool emulated; - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { - if (is_amd_pmu_msr(msr)) { - if (!xen_amd_pmu_emulate(msr, &val, 0)) - *err = native_write_msr_safe(msr, low, high); - return true; - } - } else { - int type, index; + if (!pmu_msr_chk_emulated(msr, &val, false, &emulated)) + return false; - if (is_intel_pmu_msr(msr, &type, &index)) { - if (!xen_intel_pmu_emulate(msr, &val, type, index, 0)) - *err = native_write_msr_safe(msr, low, high); - return true; - } + if (!emulated) { + if (err) + *err = native_write_msr_safe(msr, low, high); + else + native_write_msr(msr, low, high); } - return false; + return true; } static unsigned long long xen_amd_read_pmc(int counter) diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig index 3be62da8089b..ef0ebcfbccf9 100644 --- a/arch/xtensa/configs/audio_kc705_defconfig +++ b/arch/xtensa/configs/audio_kc705_defconfig @@ -120,7 +120,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index fc240737b14d..2665962d247a 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig @@ -100,7 +100,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig index e9d6b6f6eca1..236c7f23cc10 100644 --- a/arch/xtensa/configs/generic_kc705_defconfig +++ b/arch/xtensa/configs/generic_kc705_defconfig @@ -107,7 +107,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig index fcb620ef3799..8263da9e078d 100644 --- a/arch/xtensa/configs/nommu_kc705_defconfig +++ b/arch/xtensa/configs/nommu_kc705_defconfig @@ -105,7 +105,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y # CONFIG_FRAME_POINTER is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_VM=y diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index a47c85638ec1..7bdffa3a69c6 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -111,7 +111,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_VM=y CONFIG_LOCKUP_DETECTOR=y diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig index 6d1387dfa96f..98acb7191cb7 100644 --- a/arch/xtensa/configs/virt_defconfig +++ b/arch/xtensa/configs/virt_defconfig @@ -97,7 +97,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_FONTS=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index 062148e17135..1c3cebaaa71b 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -102,7 +102,7 @@ CONFIG_CRYPTO_LZO=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index b75ba16ec080..228e4dff5fb2 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -224,9 +224,6 @@ struct thread_struct { struct task_struct; struct mm_struct; -/* Free all resources held by a thread. */ -#define release_thread(thread) do { } while(0) - extern unsigned long __get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c index 296ea673d661..12b044151298 100644 --- a/drivers/base/platform-msi.c +++ b/drivers/base/platform-msi.c @@ -138,6 +138,7 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, return domain; } +EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain); static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg) diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index fac8ff983aec..65fb9bad1577 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -115,7 +115,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) return IRQ_NONE; for_each_set_bit(gpio, &irqs, gc->ngpio) - generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio)); + generic_handle_domain_irq_safe(gc->irq.domain, gpio); bcma_chipco_gpio_polarity(cc, irqs, val & irqs); return IRQ_HANDLED; diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c index 64cb060d9d75..77a41151c921 100644 --- a/drivers/gpio/gpio-mlxbf2.c +++ b/drivers/gpio/gpio-mlxbf2.c @@ -273,10 +273,8 @@ static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr) pending = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CAUSE_EVTEN0); writel(pending, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE); - for_each_set_bit(level, &pending, gc->ngpio) { - int gpio_irq = irq_find_mapping(gc->irq.domain, level); - generic_handle_irq(gpio_irq); - } + for_each_set_bit(level, &pending, gc->ngpio) + generic_handle_domain_irq_safe(gc->irq.domain, level); return IRQ_RETVAL(pending); } diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 3b81a6d35a7b..076c779f776a 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu) } static int alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { struct intel_gvt *gvt = vgpu->gvt; unsigned long request, avail, max, taken; const char *item; - if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { + if (!conf->low_mm || !conf->high_mm || !conf->fence) { gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } @@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; taken = gvt->gm.vgpu_allocated_low_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->low_gm_sz); + request = conf->low_mm; if (request > avail) goto no_enough_resource; @@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; taken = gvt->gm.vgpu_allocated_high_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->high_gm_sz); + request = conf->high_mm; if (request > avail) goto no_enough_resource; @@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_fence_sz(gvt) - HOST_FENCE; taken = gvt->fence.vgpu_allocated_fence_num; avail = max - taken; - request = param->fence_sz; + request = conf->fence; if (request > avail) goto no_enough_resource; vgpu_fence_sz(vgpu) = request; - gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); - gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); - gvt->fence.vgpu_allocated_fence_num += param->fence_sz; + gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm; + gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm; + gvt->fence.vgpu_allocated_fence_num += conf->fence; return 0; no_enough_resource: @@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) * */ int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { int ret; - ret = alloc_resource(vgpu, param); + ret = alloc_resource(vgpu, conf); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 705689e64011..dbf8d7470b2c 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -36,6 +36,7 @@ #include <uapi/linux/pci_regs.h> #include <linux/kvm_host.h> #include <linux/vfio.h> +#include <linux/mdev.h> #include "i915_drv.h" #include "intel_gvt.h" @@ -172,6 +173,7 @@ struct intel_vgpu_submission { #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" struct intel_vgpu { + struct vfio_device vfio_device; struct intel_gvt *gvt; struct mutex vgpu_lock; int id; @@ -211,7 +213,6 @@ struct intel_vgpu { u32 scan_nonprivbb; - struct vfio_device vfio_device; struct vfio_region *region; int num_regions; struct eventfd_ctx *intx_trigger; @@ -294,15 +295,25 @@ struct intel_gvt_firmware { bool firmware_loaded; }; -#define NR_MAX_INTEL_VGPU_TYPES 20 -struct intel_vgpu_type { - char name[16]; - unsigned int avail_instance; - unsigned int low_gm_size; - unsigned int high_gm_size; +struct intel_vgpu_config { + unsigned int low_mm; + unsigned int high_mm; unsigned int fence; + + /* + * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with + * a weight of 4 on a contended host, different vGPU type has different + * weight set. Legal weights range from 1 to 16. + */ unsigned int weight; - enum intel_vgpu_edid resolution; + enum intel_vgpu_edid edid; + const char *name; +}; + +struct intel_vgpu_type { + struct mdev_type type; + char name[16]; + const struct intel_vgpu_config *conf; }; struct intel_gvt { @@ -326,6 +337,8 @@ struct intel_gvt { struct intel_gvt_workload_scheduler scheduler; struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); + struct mdev_parent parent; + struct mdev_type **mdev_types; struct intel_vgpu_type *types; unsigned int num_types; struct intel_vgpu *idle_vgpu; @@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); /* ring context size i.e. the first 0x50 dwords*/ #define RING_CTX_SIZE 320 -struct intel_vgpu_creation_params { - __u64 low_gm_sz; /* in MB */ - __u64 high_gm_sz; /* in MB */ - __u64 fence_sz; - __u64 resolution; - __s32 primary; - __u64 vgpu_id; - - __u32 weight; -}; - int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param); + const struct intel_vgpu_config *conf); void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, @@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_type *type); +int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, + const struct intel_vgpu_config *conf); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e3cd58946477..7a45e5360caf 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -34,7 +34,6 @@ */ #include <linux/init.h> -#include <linux/device.h> #include <linux/mm.h> #include <linux/kthread.h> #include <linux/sched/mm.h> @@ -43,7 +42,6 @@ #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/eventfd.h> -#include <linux/uuid.h> #include <linux/mdev.h> #include <linux/debugfs.h> @@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node); -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) +static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf) { - struct intel_vgpu_type *type; - unsigned int num = 0; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - num = 0; - else - num = type->avail_instance; - - return sprintf(buf, "%u\n", num); -} - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); -} - -static ssize_t description_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - struct intel_vgpu_type *type; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - return 0; + struct intel_vgpu_type *type = + container_of(mtype, struct intel_vgpu_type, type); return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" "fence: %d\nresolution: %s\n" "weight: %d\n", - BYTES_TO_MB(type->low_gm_size), - BYTES_TO_MB(type->high_gm_size), - type->fence, vgpu_edid_str(type->resolution), - type->weight); -} - -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - struct intel_vgpu_type *type; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - return 0; - - return sprintf(buf, "%s\n", type->name); -} - -static MDEV_TYPE_ATTR_RO(available_instances); -static MDEV_TYPE_ATTR_RO(device_api); -static MDEV_TYPE_ATTR_RO(description); -static MDEV_TYPE_ATTR_RO(name); - -static struct attribute *gvt_type_attrs[] = { - &mdev_type_attr_available_instances.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_description.attr, - &mdev_type_attr_name.attr, - NULL, -}; - -static struct attribute_group *gvt_vgpu_type_groups[] = { - [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, -}; - -static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i, j; - struct intel_vgpu_type *type; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - type = &gvt->types[i]; - - group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); - if (!group) - goto unwind; - - group->name = type->name; - group->attrs = gvt_type_attrs; - gvt_vgpu_type_groups[i] = group; - } - - return 0; - -unwind: - for (j = 0; j < i; j++) { - group = gvt_vgpu_type_groups[j]; - kfree(group); - } - - return -ENOMEM; -} - -static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - group = gvt_vgpu_type_groups[i]; - gvt_vgpu_type_groups[i] = NULL; - kfree(group); - } + BYTES_TO_MB(type->conf->low_mm), + BYTES_TO_MB(type->conf->high_mm), + type->conf->fence, vgpu_edid_str(type->conf->edid), + type->conf->weight); } static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, @@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; +static int intel_vgpu_init_dev(struct vfio_device *vfio_dev) +{ + struct mdev_device *mdev = to_mdev_device(vfio_dev->dev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); + struct intel_vgpu_type *type = + container_of(mdev->type, struct intel_vgpu_type, type); + + vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt; + return intel_gvt_create_vgpu(vgpu, type->conf); +} + +static void intel_vgpu_release_dev(struct vfio_device *vfio_dev) +{ + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); + + intel_gvt_destroy_vgpu(vgpu); + vfio_free_device(vfio_dev); +} + static const struct vfio_device_ops intel_vgpu_dev_ops = { + .init = intel_vgpu_init_dev, + .release = intel_vgpu_release_dev, .open_device = intel_vgpu_open_device, .close_device = intel_vgpu_close_device, .read = intel_vgpu_read, @@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = { static int intel_vgpu_probe(struct mdev_device *mdev) { - struct device *pdev = mdev_parent_dev(mdev); - struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; - struct intel_vgpu_type *type; struct intel_vgpu *vgpu; int ret; - type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) - return -EINVAL; - - vgpu = intel_gvt_create_vgpu(gvt, type); + vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev, + &intel_vgpu_dev_ops); if (IS_ERR(vgpu)) { gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); return PTR_ERR(vgpu); } - vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, - &intel_vgpu_dev_ops); - dev_set_drvdata(&mdev->dev, vgpu); ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); - if (ret) { - intel_gvt_destroy_vgpu(vgpu); - return ret; - } + if (ret) + goto out_put_vdev; gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); return 0; + +out_put_vdev: + vfio_put_device(&vgpu->vfio_device); + return ret; } static void intel_vgpu_remove(struct mdev_device *mdev) @@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev) if (WARN_ON_ONCE(vgpu->attached)) return; - intel_gvt_destroy_vgpu(vgpu); + + vfio_unregister_group_dev(&vgpu->vfio_device); + vfio_put_device(&vgpu->vfio_device); +} + +static unsigned int intel_vgpu_get_available(struct mdev_type *mtype) +{ + struct intel_vgpu_type *type = + container_of(mtype, struct intel_vgpu_type, type); + struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt; + unsigned int low_gm_avail, high_gm_avail, fence_avail; + + mutex_lock(&gvt->lock); + low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - + gvt->gm.vgpu_allocated_low_gm_size; + high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - + gvt->gm.vgpu_allocated_high_gm_size; + fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - + gvt->fence.vgpu_allocated_fence_num; + mutex_unlock(&gvt->lock); + + return min3(low_gm_avail / type->conf->low_mm, + high_gm_avail / type->conf->high_mm, + fence_avail / type->conf->fence); } static struct mdev_driver intel_vgpu_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, .driver = { .name = "intel_vgpu_mdev", .owner = THIS_MODULE, .dev_groups = intel_vgpu_groups, }, - .probe = intel_vgpu_probe, - .remove = intel_vgpu_remove, - .supported_type_groups = gvt_vgpu_type_groups, + .probe = intel_vgpu_probe, + .remove = intel_vgpu_remove, + .get_available = intel_vgpu_get_available, + .show_description = intel_vgpu_show_description, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915) if (drm_WARN_ON(&i915->drm, !gvt)) return; - mdev_unregister_device(i915->drm.dev); - intel_gvt_cleanup_vgpu_type_groups(gvt); + mdev_unregister_parent(&gvt->parent); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_init_vgpu_type_groups(gvt); + ret = mdev_register_parent(&gvt->parent, i915->drm.dev, + &intel_vgpu_mdev_driver, + gvt->mdev_types, gvt->num_types); if (ret) goto out_destroy_idle_vgpu; - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); - if (ret) - goto out_cleanup_vgpu_type_groups; - gvt_dbg_core("gvt device initialization is done\n"); return 0; -out_cleanup_vgpu_type_groups: - intel_gvt_cleanup_vgpu_type_groups(gvt); out_destroy_idle_vgpu: intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_debugfs_clean(gvt); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 46da19b3225d..56c71474008a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } +/* + * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU + * generation type (e.g V4 as BDW server, V5 as SKL server). + * + * Depening on the physical SKU resource, we might see vGPU types like + * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of + * vGPU on same physical GPU depending on available resource. Each vGPU + * type will have a different number of avail_instance to indicate how + * many vGPU instance can be created for this type. + */ #define VGPU_MAX_WEIGHT 16 #define VGPU_WEIGHT(vgpu_num) \ (VGPU_MAX_WEIGHT / (vgpu_num)) -static const struct { - unsigned int low_mm; - unsigned int high_mm; - unsigned int fence; - - /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU - * with a weight of 4 on a contended host, different vGPU type has - * different weight set. Legal weights range from 1 to 16. - */ - unsigned int weight; - enum intel_vgpu_edid edid; - const char *name; -} vgpu_types[] = { -/* Fixed vGPU type table */ +static const struct intel_vgpu_config intel_vgpu_configs[] = { { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" }, { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" }, @@ -106,102 +103,58 @@ static const struct { */ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) { - unsigned int num_types; - unsigned int i, low_avail, high_avail; - unsigned int min_low; - - /* vGPU type name is defined as GVTg_Vx_y which contains - * physical GPU generation type (e.g V4 as BDW server, V5 as - * SKL server). - * - * Depend on physical SKU resource, might see vGPU types like - * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create - * different types of vGPU on same physical GPU depending on - * available resource. Each vGPU type will have "avail_instance" - * to indicate how many vGPU instance can be created for this - * type. - * - */ - low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; - high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; - num_types = ARRAY_SIZE(vgpu_types); + unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; + unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; + unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs); + unsigned int i; gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), GFP_KERNEL); if (!gvt->types) return -ENOMEM; - min_low = MB_TO_BYTES(32); - for (i = 0; i < num_types; ++i) { - if (low_avail / vgpu_types[i].low_mm == 0) - break; - - gvt->types[i].low_gm_size = vgpu_types[i].low_mm; - gvt->types[i].high_gm_size = vgpu_types[i].high_mm; - gvt->types[i].fence = vgpu_types[i].fence; + gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types), + GFP_KERNEL); + if (!gvt->mdev_types) + goto out_free_types; - if (vgpu_types[i].weight < 1 || - vgpu_types[i].weight > VGPU_MAX_WEIGHT) - return -EINVAL; + for (i = 0; i < num_types; ++i) { + const struct intel_vgpu_config *conf = &intel_vgpu_configs[i]; - gvt->types[i].weight = vgpu_types[i].weight; - gvt->types[i].resolution = vgpu_types[i].edid; - gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, - high_avail / vgpu_types[i].high_mm); + if (low_avail / conf->low_mm == 0) + break; + if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT) + goto out_free_mdev_types; - if (GRAPHICS_VER(gvt->gt->i915) == 8) - sprintf(gvt->types[i].name, "GVTg_V4_%s", - vgpu_types[i].name); - else if (GRAPHICS_VER(gvt->gt->i915) == 9) - sprintf(gvt->types[i].name, "GVTg_V5_%s", - vgpu_types[i].name); + sprintf(gvt->types[i].name, "GVTg_V%u_%s", + GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name); + gvt->types[i].conf = conf; gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, - gvt->types[i].avail_instance, - gvt->types[i].low_gm_size, - gvt->types[i].high_gm_size, gvt->types[i].fence, - gvt->types[i].weight, - vgpu_edid_str(gvt->types[i].resolution)); + min(low_avail / conf->low_mm, + high_avail / conf->high_mm), + conf->low_mm, conf->high_mm, conf->fence, + conf->weight, vgpu_edid_str(conf->edid)); + + gvt->mdev_types[i] = &gvt->types[i].type; + gvt->mdev_types[i]->sysfs_name = gvt->types[i].name; } gvt->num_types = i; return 0; -} -void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) -{ +out_free_mdev_types: + kfree(gvt->mdev_types); +out_free_types: kfree(gvt->types); + return -EINVAL; } -static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) +void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) { - int i; - unsigned int low_gm_avail, high_gm_avail, fence_avail; - unsigned int low_gm_min, high_gm_min, fence_min; - - /* Need to depend on maxium hw resource size but keep on - * static config for now. - */ - low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - - gvt->gm.vgpu_allocated_high_gm_size; - fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - - gvt->fence.vgpu_allocated_fence_num; - - for (i = 0; i < gvt->num_types; i++) { - low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; - high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; - fence_min = fence_avail / gvt->types[i].fence; - gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min), - fence_min); - - gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n", - i, gvt->types[i].name, - gvt->types[i].avail_instance, gvt->types[i].low_gm_size, - gvt->types[i].high_gm_size, gvt->types[i].fence); - } + kfree(gvt->mdev_types); + kfree(gvt->types); } /** @@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); - - mutex_lock(&gvt->lock); - intel_gvt_update_vgpu_types(gvt); - mutex_unlock(&gvt->lock); - - vfree(vgpu); } #define IDLE_VGPU_IDR 0 @@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) vfree(vgpu); } -static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_creation_params *param) +int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, + const struct intel_vgpu_config *conf) { + struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->gt->i915; - struct intel_vgpu *vgpu; int ret; - gvt_dbg_core("low %llu MB high %llu MB fence %llu\n", - param->low_gm_sz, param->high_gm_sz, - param->fence_sz); - - vgpu = vzalloc(sizeof(*vgpu)); - if (!vgpu) - return ERR_PTR(-ENOMEM); + gvt_dbg_core("low %u MB high %u MB fence %u\n", + BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm), + conf->fence); + mutex_lock(&gvt->lock); ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, GFP_KERNEL); if (ret < 0) - goto out_free_vgpu; + goto out_unlock;; vgpu->id = ret; - vgpu->gvt = gvt; - vgpu->sched_ctl.weight = param->weight; + vgpu->sched_ctl.weight = conf->weight; mutex_init(&vgpu->vgpu_lock); mutex_init(&vgpu->dmabuf_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init_base(&vgpu->object_idr, 1); - intel_vgpu_init_cfg_space(vgpu, param->primary); + intel_vgpu_init_cfg_space(vgpu, 1); vgpu->d3_entered = false; ret = intel_vgpu_init_mmio(vgpu); if (ret) goto out_clean_idr; - ret = intel_vgpu_alloc_resource(vgpu, param); + ret = intel_vgpu_alloc_resource(vgpu, conf); if (ret) goto out_clean_vgpu_mmio; @@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_gtt; - ret = intel_vgpu_init_display(vgpu, param->resolution); + ret = intel_vgpu_init_display(vgpu, conf->edid); if (ret) goto out_clean_opregion; @@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - return vgpu; + intel_gvt_update_reg_whitelist(vgpu); + mutex_unlock(&gvt->lock); + return 0; out_clean_sched_policy: intel_vgpu_clean_sched_policy(vgpu); @@ -455,48 +400,9 @@ out_clean_vgpu_mmio: intel_vgpu_clean_mmio(vgpu); out_clean_idr: idr_remove(&gvt->vgpu_idr, vgpu->id); -out_free_vgpu: - vfree(vgpu); - return ERR_PTR(ret); -} - -/** - * intel_gvt_create_vgpu - create a virtual GPU - * @gvt: GVT device - * @type: type of the vGPU to create - * - * This function is called when user wants to create a virtual GPU. - * - * Returns: - * pointer to intel_vgpu, error pointer if failed. - */ -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_type *type) -{ - struct intel_vgpu_creation_params param; - struct intel_vgpu *vgpu; - - param.primary = 1; - param.low_gm_sz = type->low_gm_size; - param.high_gm_sz = type->high_gm_size; - param.fence_sz = type->fence; - param.weight = type->weight; - param.resolution = type->resolution; - - /* XXX current param based on MB */ - param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); - param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz); - - mutex_lock(&gvt->lock); - vgpu = __intel_gvt_create_vgpu(gvt, ¶m); - if (!IS_ERR(vgpu)) { - /* calculate left instance change for types */ - intel_gvt_update_vgpu_types(gvt); - intel_gvt_update_reg_whitelist(vgpu); - } +out_unlock: mutex_unlock(&gvt->lock); - - return vgpu; + return ret; } /** diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index eb5ea5b69cfa..7ef9f5e696d3 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -3,7 +3,7 @@ menu "IRQ chip support" config IRQCHIP def_bool y - depends on OF_IRQ + depends on (OF_IRQ || ACPI_GENERIC_GSI) config ARM_GIC bool @@ -481,6 +481,21 @@ config IMX_INTMUX help Support for the i.MX INTMUX interrupt multiplexer. +config IMX_MU_MSI + tristate "i.MX MU used as MSI controller" + depends on OF && HAS_IOMEM + depends on ARCH_MXC || COMPILE_TEST + default m if ARCH_MXC + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select GENERIC_MSI_IRQ_DOMAIN + help + Provide a driver for the i.MX Messaging Unit block used as a + CPU-to-CPU MSI controller. This requires a specially crafted DT + to make use of this driver. + + If unsure, say N + config LS1X_IRQ bool "Loongson-1 Interrupt Controller" depends on MACH_LOONGSON32 diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index b6acbca2248b..87b49a10962c 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o +obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o obj-$(CONFIG_MADERA_IRQ) += irq-madera.o obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 262658fd5f9e..34d58567b78d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -978,7 +978,7 @@ static int __gic_update_rdist_properties(struct redist_region *region, u64 typer = gic_read_typer(ptr + GICR_TYPER); u32 ctlr = readl_relaxed(ptr + GICR_CTLR); - /* Boot-time cleanip */ + /* Boot-time cleanup */ if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { u64 val; diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c new file mode 100644 index 000000000000..229039eda1b1 --- /dev/null +++ b/drivers/irqchip/irq-imx-mu-msi.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Freescale MU used as MSI controller + * + * Copyright (c) 2018 Pengutronix, Oleksij Rempel <[email protected]> + * Copyright 2022 NXP + * Frank Li <[email protected]> + * Peng Fan <[email protected]> + * + * Based on drivers/mailbox/imx-mailbox.c + */ + +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/spinlock.h> + +#define IMX_MU_CHANS 4 + +enum imx_mu_xcr { + IMX_MU_GIER, + IMX_MU_GCR, + IMX_MU_TCR, + IMX_MU_RCR, + IMX_MU_xCR_MAX, +}; + +enum imx_mu_xsr { + IMX_MU_SR, + IMX_MU_GSR, + IMX_MU_TSR, + IMX_MU_RSR, + IMX_MU_xSR_MAX +}; + +enum imx_mu_type { + IMX_MU_V2 = BIT(1), +}; + +/* Receive Interrupt Enable */ +#define IMX_MU_xCR_RIEn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) +#define IMX_MU_xSR_RFn(data, x) ((data->cfg->type) & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x)))) + +struct imx_mu_dcfg { + enum imx_mu_type type; + u32 xTR; /* Transmit Register0 */ + u32 xRR; /* Receive Register0 */ + u32 xSR[IMX_MU_xSR_MAX]; /* Status Registers */ + u32 xCR[IMX_MU_xCR_MAX]; /* Control Registers */ +}; + +struct imx_mu_msi { + raw_spinlock_t lock; + struct irq_domain *msi_domain; + void __iomem *regs; + phys_addr_t msiir_addr; + const struct imx_mu_dcfg *cfg; + unsigned long used; + struct clk *clk; +}; + +static void imx_mu_write(struct imx_mu_msi *msi_data, u32 val, u32 offs) +{ + iowrite32(val, msi_data->regs + offs); +} + +static u32 imx_mu_read(struct imx_mu_msi *msi_data, u32 offs) +{ + return ioread32(msi_data->regs + offs); +} + +static u32 imx_mu_xcr_rmw(struct imx_mu_msi *msi_data, enum imx_mu_xcr type, u32 set, u32 clr) +{ + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&msi_data->lock, flags); + val = imx_mu_read(msi_data, msi_data->cfg->xCR[type]); + val &= ~clr; + val |= set; + imx_mu_write(msi_data, val, msi_data->cfg->xCR[type]); + raw_spin_unlock_irqrestore(&msi_data->lock, flags); + + return val; +} + +static void imx_mu_msi_parent_mask_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(msi_data, data->hwirq)); +} + +static void imx_mu_msi_parent_unmask_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_xcr_rmw(msi_data, IMX_MU_RCR, IMX_MU_xCR_RIEn(msi_data, data->hwirq), 0); +} + +static void imx_mu_msi_parent_ack_irq(struct irq_data *data) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + + imx_mu_read(msi_data, msi_data->cfg->xRR + data->hwirq * 4); +} + +static struct irq_chip imx_mu_msi_irq_chip = { + .name = "MU-MSI", + .irq_ack = irq_chip_ack_parent, +}; + +static struct msi_domain_ops imx_mu_msi_irq_ops = { +}; + +static struct msi_domain_info imx_mu_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), + .ops = &imx_mu_msi_irq_ops, + .chip = &imx_mu_msi_irq_chip, +}; + +static void imx_mu_msi_parent_compose_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(data); + u64 addr = msi_data->msiir_addr + 4 * data->hwirq; + + msg->address_hi = upper_32_bits(addr); + msg->address_lo = lower_32_bits(addr); + msg->data = data->hwirq; +} + +static int imx_mu_msi_parent_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + return -EINVAL; +} + +static struct irq_chip imx_mu_msi_parent_chip = { + .name = "MU", + .irq_mask = imx_mu_msi_parent_mask_irq, + .irq_unmask = imx_mu_msi_parent_unmask_irq, + .irq_ack = imx_mu_msi_parent_ack_irq, + .irq_compose_msi_msg = imx_mu_msi_parent_compose_msg, + .irq_set_affinity = imx_mu_msi_parent_set_affinity, +}; + +static int imx_mu_msi_domain_irq_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *args) +{ + struct imx_mu_msi *msi_data = domain->host_data; + unsigned long flags; + int pos, err = 0; + + WARN_ON(nr_irqs != 1); + + raw_spin_lock_irqsave(&msi_data->lock, flags); + pos = find_first_zero_bit(&msi_data->used, IMX_MU_CHANS); + if (pos < IMX_MU_CHANS) + __set_bit(pos, &msi_data->used); + else + err = -ENOSPC; + raw_spin_unlock_irqrestore(&msi_data->lock, flags); + + if (err) + return err; + + irq_domain_set_info(domain, virq, pos, + &imx_mu_msi_parent_chip, msi_data, + handle_edge_irq, NULL, NULL); + return 0; +} + +static void imx_mu_msi_domain_irq_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct imx_mu_msi *msi_data = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&msi_data->lock, flags); + __clear_bit(d->hwirq, &msi_data->used); + raw_spin_unlock_irqrestore(&msi_data->lock, flags); +} + +static const struct irq_domain_ops imx_mu_msi_domain_ops = { + .alloc = imx_mu_msi_domain_irq_alloc, + .free = imx_mu_msi_domain_irq_free, +}; + +static void imx_mu_msi_irq_handler(struct irq_desc *desc) +{ + struct imx_mu_msi *msi_data = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 status; + int i; + + status = imx_mu_read(msi_data, msi_data->cfg->xSR[IMX_MU_RSR]); + + chained_irq_enter(chip, desc); + for (i = 0; i < IMX_MU_CHANS; i++) { + if (status & IMX_MU_xSR_RFn(msi_data, i)) + generic_handle_domain_irq(msi_data->msi_domain, i); + } + chained_irq_exit(chip, desc); +} + +static int imx_mu_msi_domains_init(struct imx_mu_msi *msi_data, struct device *dev) +{ + struct fwnode_handle *fwnodes = dev_fwnode(dev); + struct irq_domain *parent; + + /* Initialize MSI domain parent */ + parent = irq_domain_create_linear(fwnodes, + IMX_MU_CHANS, + &imx_mu_msi_domain_ops, + msi_data); + if (!parent) { + dev_err(dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + msi_data->msi_domain = platform_msi_create_irq_domain(fwnodes, + &imx_mu_msi_domain_info, + parent); + + if (!msi_data->msi_domain) { + dev_err(dev, "failed to create MSI domain\n"); + irq_domain_remove(parent); + return -ENOMEM; + } + + irq_domain_set_pm_device(msi_data->msi_domain, dev); + + return 0; +} + +/* Register offset of different version MU IP */ +static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = { + .type = 0, + .xTR = 0x0, + .xRR = 0x10, + .xSR = { + [IMX_MU_SR] = 0x20, + [IMX_MU_GSR] = 0x20, + [IMX_MU_TSR] = 0x20, + [IMX_MU_RSR] = 0x20, + }, + .xCR = { + [IMX_MU_GIER] = 0x24, + [IMX_MU_GCR] = 0x24, + [IMX_MU_TCR] = 0x24, + [IMX_MU_RCR] = 0x24, + }, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = { + .type = 0, + .xTR = 0x20, + .xRR = 0x40, + .xSR = { + [IMX_MU_SR] = 0x60, + [IMX_MU_GSR] = 0x60, + [IMX_MU_TSR] = 0x60, + [IMX_MU_RSR] = 0x60, + }, + .xCR = { + [IMX_MU_GIER] = 0x64, + [IMX_MU_GCR] = 0x64, + [IMX_MU_TCR] = 0x64, + [IMX_MU_RCR] = 0x64, + }, +}; + +static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = { + .type = IMX_MU_V2, + .xTR = 0x200, + .xRR = 0x280, + .xSR = { + [IMX_MU_SR] = 0xC, + [IMX_MU_GSR] = 0x118, + [IMX_MU_TSR] = 0x124, + [IMX_MU_RSR] = 0x12C, + }, + .xCR = { + [IMX_MU_GIER] = 0x110, + [IMX_MU_GCR] = 0x114, + [IMX_MU_TCR] = 0x120, + [IMX_MU_RCR] = 0x128 + }, +}; + +static int __init imx_mu_of_init(struct device_node *dn, + struct device_node *parent, + const struct imx_mu_dcfg *cfg) +{ + struct platform_device *pdev = of_find_device_by_node(dn); + struct device_link *pd_link_a; + struct device_link *pd_link_b; + struct imx_mu_msi *msi_data; + struct resource *res; + struct device *pd_a; + struct device *pd_b; + struct device *dev; + int ret; + int irq; + + dev = &pdev->dev; + + msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); + if (!msi_data) + return -ENOMEM; + + msi_data->cfg = cfg; + + msi_data->regs = devm_platform_ioremap_resource_byname(pdev, "processor-a-side"); + if (IS_ERR(msi_data->regs)) { + dev_err(&pdev->dev, "failed to initialize 'regs'\n"); + return PTR_ERR(msi_data->regs); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "processor-b-side"); + if (!res) + return -EIO; + + msi_data->msiir_addr = res->start + msi_data->cfg->xTR; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENODEV; + + platform_set_drvdata(pdev, msi_data); + + msi_data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(msi_data->clk)) + return PTR_ERR(msi_data->clk); + + pd_a = dev_pm_domain_attach_by_name(dev, "processor-a-side"); + if (IS_ERR(pd_a)) + return PTR_ERR(pd_a); + + pd_b = dev_pm_domain_attach_by_name(dev, "processor-b-side"); + if (IS_ERR(pd_b)) + return PTR_ERR(pd_b); + + pd_link_a = device_link_add(dev, pd_a, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + + if (!pd_link_a) { + dev_err(dev, "Failed to add device_link to mu a.\n"); + goto err_pd_a; + } + + pd_link_b = device_link_add(dev, pd_b, + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + + + if (!pd_link_b) { + dev_err(dev, "Failed to add device_link to mu a.\n"); + goto err_pd_b; + } + + ret = imx_mu_msi_domains_init(msi_data, dev); + if (ret) + goto err_dm_init; + + pm_runtime_enable(dev); + + irq_set_chained_handler_and_data(irq, + imx_mu_msi_irq_handler, + msi_data); + + return 0; + +err_dm_init: + device_link_remove(dev, pd_b); +err_pd_b: + device_link_remove(dev, pd_a); +err_pd_a: + return -EINVAL; +} + +static int __maybe_unused imx_mu_runtime_suspend(struct device *dev) +{ + struct imx_mu_msi *priv = dev_get_drvdata(dev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused imx_mu_runtime_resume(struct device *dev) +{ + struct imx_mu_msi *priv = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + dev_err(dev, "failed to enable clock\n"); + + return ret; +} + +static const struct dev_pm_ops imx_mu_pm_ops = { + SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend, + imx_mu_runtime_resume, NULL) +}; + +static int __init imx_mu_imx7ulp_of_init(struct device_node *dn, + struct device_node *parent) +{ + return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp); +} + +static int __init imx_mu_imx6sx_of_init(struct device_node *dn, + struct device_node *parent) +{ + return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx); +} + +static int __init imx_mu_imx8ulp_of_init(struct device_node *dn, + struct device_node *parent) +{ + return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp); +} + +IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi) +IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init) +IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init) +IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init) +IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops) + + +MODULE_AUTHOR("Frank Li <[email protected]>"); +MODULE_DESCRIPTION("Freescale MU MSI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c index 853b3972dbe7..d8d48b1f7c29 100644 --- a/drivers/irqchip/irq-ls-extirq.c +++ b/drivers/irqchip/irq-ls-extirq.c @@ -6,8 +6,7 @@ #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of.h> -#include <linux/mfd/syscon.h> -#include <linux/regmap.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <dt-bindings/interrupt-controller/arm-gic.h> @@ -16,13 +15,41 @@ #define LS1021A_SCFGREVCR 0x200 struct ls_extirq_data { - struct regmap *syscon; - u32 intpcr; + void __iomem *intpcr; + raw_spinlock_t lock; + bool big_endian; bool is_ls1021a_or_ls1043a; u32 nirq; struct irq_fwspec map[MAXIRQ]; }; +static void ls_extirq_intpcr_rmw(struct ls_extirq_data *priv, u32 mask, + u32 value) +{ + u32 intpcr; + + /* + * Serialize concurrent calls to ls_extirq_set_type() from multiple + * IRQ descriptors, making sure the read-modify-write is atomic. + */ + raw_spin_lock(&priv->lock); + + if (priv->big_endian) + intpcr = ioread32be(priv->intpcr); + else + intpcr = ioread32(priv->intpcr); + + intpcr &= ~mask; + intpcr |= value; + + if (priv->big_endian) + iowrite32be(intpcr, priv->intpcr); + else + iowrite32(intpcr, priv->intpcr); + + raw_spin_unlock(&priv->lock); +} + static int ls_extirq_set_type(struct irq_data *data, unsigned int type) { @@ -51,7 +78,8 @@ ls_extirq_set_type(struct irq_data *data, unsigned int type) default: return -EINVAL; } - regmap_update_bits(priv->syscon, priv->intpcr, mask, value); + + ls_extirq_intpcr_rmw(priv, mask, value); return irq_chip_set_type_parent(data, type); } @@ -143,7 +171,6 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) static int __init ls_extirq_of_init(struct device_node *node, struct device_node *parent) { - struct irq_domain *domain, *parent_domain; struct ls_extirq_data *priv; int ret; @@ -151,40 +178,52 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent) parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("Cannot find parent domain\n"); - return -ENODEV; + ret = -ENODEV; + goto err_irq_find_host; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->syscon = syscon_node_to_regmap(node->parent); - if (IS_ERR(priv->syscon)) { - ret = PTR_ERR(priv->syscon); - pr_err("Failed to lookup parent regmap\n"); - goto out; + if (!priv) { + ret = -ENOMEM; + goto err_alloc_priv; } - ret = of_property_read_u32(node, "reg", &priv->intpcr); - if (ret) { - pr_err("Missing INTPCR offset value\n"); - goto out; + + /* + * All extirq OF nodes are under a scfg/syscon node with + * the 'ranges' property + */ + priv->intpcr = of_iomap(node, 0); + if (!priv->intpcr) { + pr_err("Cannot ioremap OF node %pOF\n", node); + ret = -ENOMEM; + goto err_iomap; } ret = ls_extirq_parse_map(priv, node); if (ret) - goto out; + goto err_parse_map; + priv->big_endian = of_device_is_big_endian(parent); priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") || of_device_is_compatible(node, "fsl,ls1043a-extirq"); + raw_spin_lock_init(&priv->lock); domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node, &extirq_domain_ops, priv); - if (!domain) + if (!domain) { ret = -ENOMEM; + goto err_add_hierarchy; + } -out: - if (ret) - kfree(priv); + return 0; + +err_add_hierarchy: +err_parse_map: + iounmap(priv->intpcr); +err_iomap: + kfree(priv); +err_alloc_priv: +err_irq_find_host: return ret; } diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c index 56bf502d9c67..2a349082af81 100644 --- a/drivers/irqchip/irq-realtek-rtl.c +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -21,11 +21,33 @@ #define RTL_ICTL_IRR2 0x10 #define RTL_ICTL_IRR3 0x14 +#define RTL_ICTL_NUM_INPUTS 32 + #define REG(x) (realtek_ictl_base + x) static DEFINE_RAW_SPINLOCK(irq_lock); static void __iomem *realtek_ictl_base; +/* + * IRR0-IRR3 store 4 bits per interrupt, but Realtek uses inverted numbering, + * placing IRQ 31 in the first four bits. A routing value of '0' means the + * interrupt is left disconnected. Routing values {1..15} connect to output + * lines {0..14}. + */ +#define IRR_OFFSET(idx) (4 * (3 - (idx * 4) / 32)) +#define IRR_SHIFT(idx) ((idx * 4) % 32) + +static void write_irr(void __iomem *irr0, int idx, u32 value) +{ + unsigned int offset = IRR_OFFSET(idx); + unsigned int shift = IRR_SHIFT(idx); + u32 irr; + + irr = readl(irr0 + offset) & ~(0xf << shift); + irr |= (value & 0xf) << shift; + writel(irr, irr0 + offset); +} + static void realtek_ictl_unmask_irq(struct irq_data *i) { unsigned long flags; @@ -62,8 +84,14 @@ static struct irq_chip realtek_ictl_irq = { static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { + unsigned long flags; + irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq); + raw_spin_lock_irqsave(&irq_lock, flags); + write_irr(REG(RTL_ICTL_IRR0), hw, 1); + raw_spin_unlock_irqrestore(&irq_lock, flags); + return 0; } @@ -95,90 +123,50 @@ out: chained_irq_exit(chip, desc); } -/* - * SoC interrupts are cascaded to MIPS CPU interrupts according to the - * interrupt-map in the device tree. Each SoC interrupt gets 4 bits for - * the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts - * thus go into 4 IRRs. A routing value of '0' means the interrupt is left - * disconnected. Routing values {1..15} connect to output lines {0..14}. - */ -static int __init map_interrupts(struct device_node *node, struct irq_domain *domain) -{ - struct device_node *cpu_ictl; - const __be32 *imap; - u32 imaplen, soc_int, cpu_int, tmp, regs[4]; - int ret, i, irr_regs[] = { - RTL_ICTL_IRR3, - RTL_ICTL_IRR2, - RTL_ICTL_IRR1, - RTL_ICTL_IRR0, - }; - u8 mips_irqs_set; - - ret = of_property_read_u32(node, "#address-cells", &tmp); - if (ret || tmp) - return -EINVAL; - - imap = of_get_property(node, "interrupt-map", &imaplen); - if (!imap || imaplen % 3) - return -EINVAL; - - mips_irqs_set = 0; - memset(regs, 0, sizeof(regs)); - for (i = 0; i < imaplen; i += 3 * sizeof(u32)) { - soc_int = be32_to_cpup(imap); - if (soc_int > 31) - return -EINVAL; - - cpu_ictl = of_find_node_by_phandle(be32_to_cpup(imap + 1)); - if (!cpu_ictl) - return -EINVAL; - ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp); - of_node_put(cpu_ictl); - if (ret || tmp != 1) - return -EINVAL; - - cpu_int = be32_to_cpup(imap + 2); - if (cpu_int > 7 || cpu_int < 2) - return -EINVAL; - - if (!(mips_irqs_set & BIT(cpu_int))) { - irq_set_chained_handler_and_data(cpu_int, realtek_irq_dispatch, - domain); - mips_irqs_set |= BIT(cpu_int); - } - - /* Use routing values (1..6) for CPU interrupts (2..7) */ - regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32; - imap += 3; - } - - for (i = 0; i < 4; i++) - writel(regs[i], REG(irr_regs[i])); - - return 0; -} - static int __init realtek_rtl_of_init(struct device_node *node, struct device_node *parent) { + struct of_phandle_args oirq; struct irq_domain *domain; - int ret; + unsigned int soc_irq; + int parent_irq; realtek_ictl_base = of_iomap(node, 0); if (!realtek_ictl_base) return -ENXIO; - /* Disable all cascaded interrupts */ + /* Disable all cascaded interrupts and clear routing */ writel(0, REG(RTL_ICTL_GIMR)); + for (soc_irq = 0; soc_irq < RTL_ICTL_NUM_INPUTS; soc_irq++) + write_irr(REG(RTL_ICTL_IRR0), soc_irq, 0); + + if (WARN_ON(!of_irq_count(node))) { + /* + * If DT contains no parent interrupts, assume MIPS CPU IRQ 2 + * (HW0) is connected to the first output. This is the case for + * all known hardware anyway. "interrupt-map" is deprecated, so + * don't bother trying to parse that. + */ + oirq.np = of_find_compatible_node(NULL, NULL, "mti,cpu-interrupt-controller"); + oirq.args_count = 1; + oirq.args[0] = 2; + + parent_irq = irq_create_of_mapping(&oirq); + + of_node_put(oirq.np); + } else { + parent_irq = of_irq_get(node, 0); + } - domain = irq_domain_add_simple(node, 32, 0, - &irq_domain_ops, NULL); + if (parent_irq < 0) + return parent_irq; + else if (!parent_irq) + return -ENODEV; - ret = map_interrupts(node, domain); - if (ret) { - pr_err("invalid interrupt map\n"); - return ret; - } + domain = irq_domain_add_linear(node, RTL_ICTL_NUM_INPUTS, &irq_domain_ops, NULL); + if (!domain) + return -ENOMEM; + + irq_set_chained_handler_and_data(parent_irq, realtek_irq_dispatch, domain); return 0; } diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index af17459c1a5c..e964a8dd8512 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2345,8 +2345,7 @@ HFC_init(void) static void __exit HFC_cleanup(void) { - if (timer_pending(&hfc_tl)) - del_timer_sync(&hfc_tl); + del_timer_sync(&hfc_tl); pci_unregister_driver(&hfc_driver); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index ce89611a136e..54cd009aee50 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1140,8 +1140,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; + unsigned int arg = card->erase_arg; - mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg); + if (mmc_card_broken_sd_discard(card)) + arg = SD_ERASE_ARG; + + mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg); } static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 99045e138ba4..cfdd1ff40b86 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -73,6 +73,7 @@ struct mmc_fixup { #define EXT_CSD_REV_ANY (-1u) #define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_SANDISK_SD 0x3 #define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 @@ -258,4 +259,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c) return c->quirks & MMC_QUIRK_BROKEN_HPI; } +static inline int mmc_card_broken_sd_discard(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD; +} + #endif diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index be4393988086..29b9497936df 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -100,6 +100,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), + /* + * Some SD cards reports discard support while they don't + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_DISCARD), + END_FIXUP }; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 6edbf5c161ab..b970699743e0 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -128,6 +128,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, struct clk *ref_clk = priv->clk; unsigned int freq, diff, best_freq = 0, diff_min = ~0; unsigned int new_clock, clkh_shift = 0; + unsigned int new_upper_limit; int i; /* @@ -153,13 +154,20 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, * greater than, new_clock. As we can divide by 1 << i for * any i in [0, 9] we want the input clock to be as close as * possible, but no greater than, new_clock << i. + * + * Add an upper limit of 1/1024 rate higher to the clock rate to fix + * clk rate jumping to lower rate due to rounding error (eg: RZ/G2L has + * 3 clk sources 533.333333 MHz, 400 MHz and 266.666666 MHz. The request + * for 533.333333 MHz will selects a slower 400 MHz due to rounding + * error (533333333 Hz / 4 * 4 = 533333332 Hz < 533333333 Hz)). */ for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) { freq = clk_round_rate(ref_clk, new_clock << i); - if (freq > (new_clock << i)) { + new_upper_limit = (new_clock << i) + ((new_clock << i) >> 10); + if (freq > new_upper_limit) { /* Too fast; look for a slightly slower option */ freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3); - if (freq > (new_clock << i)) + if (freq > new_upper_limit) continue; } @@ -181,6 +189,7 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host, static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, unsigned int new_clock) { + unsigned int clk_margin; u32 clk = 0, clock; sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & @@ -194,7 +203,13 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host, host->mmc->actual_clock = renesas_sdhi_clk_update(host, new_clock); clock = host->mmc->actual_clock / 512; - for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1) + /* + * Add a margin of 1/1024 rate higher to the clock rate in order + * to avoid clk variable setting a value of 0 due to the margin + * provided for actual_clock in renesas_sdhi_clk_update(). + */ + clk_margin = new_clock >> 10; + for (clk = 0x80000080; new_clock + clk_margin >= (clock << 1); clk >>= 1) clock <<= 1; /* 1/1 clock is option */ diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 46c55ab4884c..b92a408f138d 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -309,7 +309,7 @@ static unsigned int sdhci_sprd_get_max_clock(struct sdhci_host *host) static unsigned int sdhci_sprd_get_min_clock(struct sdhci_host *host) { - return 400000; + return 100000; } static void sdhci_sprd_set_uhs_signaling(struct sdhci_host *host, diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 2d2d8260c681..413925bce0ca 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -773,7 +773,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) dev_err(dev, "failed to set clk rate to %luHz: %d\n", host_clk, err); - tegra_host->curr_clk_rate = host_clk; + tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk); if (tegra_host->ddr_signaling) host->max_clk = host_clk; else diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 841da29cef93..f6c0938027ec 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -178,6 +178,8 @@ struct kvaser_usb_dev_cfg { extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops; extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops; +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv); + int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, int *actual_len); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 824cab80aa02..e91648ed7386 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -477,7 +477,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) /* This method might sleep. Do not call it in the atomic context * of URB completions. */ -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) { usb_kill_anchored_urbs(&priv->tx_submitted); kvaser_usb_reset_tx_urb_contexts(priv); @@ -729,6 +729,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); + init_completion(&priv->flush_comp); priv->can.ctrlmode_supported = 0; priv->dev = dev; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 6871d474dabf..7b52fda73d82 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1916,7 +1916,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->flush_comp); + reinit_completion(&priv->flush_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, priv->channel); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 07f687f29b34..50f2ac8319ff 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -310,6 +310,38 @@ struct kvaser_cmd { } u; } __packed; +#define CMD_SIZE_ANY 0xff +#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) + +static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), + [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + /* ignored events: */ + [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, +}; + +static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { + [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), + [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), + [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), + [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), + [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), + [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + /* ignored events: */ + [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, +}; + /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * @@ -397,6 +429,43 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; +static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + /* buffer size >= cmd->len ensured by caller */ + u8 min_size = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) + min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; + break; + case KVASER_USBCAN: + if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) + min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; + break; + } + + if (min_size == CMD_SIZE_ANY) + return 0; + + if (min_size) { + min_size += CMD_HEADER_LEN; + if (cmd->len >= min_size) + return 0; + + dev_err_ratelimited(&dev->intf->dev, + "Received command %u too short (size %u, needed %u)", + cmd->id, cmd->len, min_size); + return -EIO; + } + + dev_warn_ratelimited(&dev->intf->dev, + "Unhandled command (%d, size %d)\n", + cmd->id, cmd->len); + return -EINVAL; +} + static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, @@ -502,6 +571,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, end: kfree(buf); + if (err == 0) + err = kvaser_usb_leaf_verify_size(dev, cmd); + return err; } @@ -1133,6 +1205,9 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { + if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) + return; + switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); @@ -1351,9 +1426,13 @@ static int kvaser_usb_leaf_set_mode(struct net_device *netdev, switch (mode) { case CAN_MODE_START: + kvaser_usb_unlink_tx_urbs(priv); + err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index aaee7c4248e6..1744d623999d 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1169,6 +1169,11 @@ static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv, return ret; } +static bool adin1110_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &adin1110_netdev_ops; +} + static int adin1110_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -1177,6 +1182,9 @@ static int adin1110_netdevice_event(struct notifier_block *unused, struct netdev_notifier_changeupper_info *info = ptr; int ret = 0; + if (!adin1110_port_dev_check(dev)) + return NOTIFY_DONE; + switch (event) { case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(info->upper_dev)) { @@ -1202,11 +1210,6 @@ static void adin1110_disconnect_phy(void *data) phy_disconnect(data); } -static bool adin1110_port_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &adin1110_netdev_ops; -} - static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 2e6c5f258a1f..0ddfb5b5d53c 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -17,8 +17,3 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ - -# FIXME: temporarily silence -Warray-bounds on non W=1+ builds -ifndef KBUILD_EXTRA_WARN -CFLAGS_tg3.o += -Wno-array-bounds -endif diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 16b73bb9acc7..5af16e5f9ad0 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -484,7 +484,7 @@ struct bcm_rsb { /* Number of Receive hardware descriptor words */ #define SP_NUM_HW_RX_DESC_WORDS 1024 -#define SP_LT_NUM_HW_RX_DESC_WORDS 256 +#define SP_LT_NUM_HW_RX_DESC_WORDS 512 /* Internal linked-list RAM size */ #define SP_NUM_TX_DESC 1536 diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index e6416332ec79..a842e1999122 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -7,7 +7,6 @@ #include <linux/math64.h> #include <linux/refcount.h> #include <net/pkt_cls.h> -#include <net/pkt_sched.h> #include <net/tc_act/tc_gate.h> static u16 enetc_get_max_gcl_len(struct enetc_hw *hw) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c index 5ba618aed6ad..4a343f853b28 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c @@ -1182,8 +1182,10 @@ static int mcs_register_interrupts(struct mcs *mcs) mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff); mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); - if (!mcs->tx_sa_active) + if (!mcs->tx_sa_active) { + ret = -ENOMEM; goto exit; + } return ret; exit: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 64f3acd7f67b..9809f551fc2e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -133,7 +133,7 @@ static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, default: ret = -EINVAL; goto fail; - }; + } mutex_unlock(&mbox->lock); @@ -284,7 +284,7 @@ static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf, sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox); if (!sc_req) { - return -ENOMEM; + ret = -ENOMEM; goto fail; } @@ -594,7 +594,7 @@ static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id, req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox); if (!req) { - return -ENOMEM; + ret = -ENOMEM; goto fail; } @@ -1653,6 +1653,7 @@ int cn10k_mcs_init(struct otx2_nic *pfvf) return 0; fail: dev_err(pfvf->dev, "Cannot notify PN wrapped event\n"); + mutex_unlock(&mbox->lock); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 5803d7f9137c..892ca88e0cf4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -2810,7 +2810,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_del_mcam_entries; + goto err_mcs_free; } err = otx2_wq_init(pf); @@ -2849,6 +2849,8 @@ err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_mcs_free: + cn10k_mcs_free(pf); err_del_mcam_entries: otx2_mcam_flow_del(pf); err_ptp_destroy: diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c index 6f2b95a5263e..1da9c1bc1ee9 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c @@ -96,6 +96,8 @@ int prestera_mall_replace(struct prestera_flow_block *block, list_for_each_entry(binding, &block->binding_list, list) { err = prestera_span_rule_add(binding, port, block->ingress); + if (err == -EEXIST) + return err; if (err) goto rollback; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c index 4f65df0ae5e8..aa080dc57ff0 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c @@ -498,8 +498,8 @@ prestera_nexthop_group_get(struct prestera_switch *sw, refcount_inc(&nh_grp->refcount); } else { nh_grp = __prestera_nexthop_group_create(sw, key); - if (IS_ERR(nh_grp)) - return ERR_CAST(nh_grp); + if (!nh_grp) + return ERR_PTR(-ENOMEM); refcount_set(&nh_grp->refcount, 1); } @@ -651,7 +651,7 @@ prestera_fib_node_create(struct prestera_switch *sw, case PRESTERA_FIB_TYPE_UC_NH: fib_node->info.nh_grp = prestera_nexthop_group_get(sw, nh_grp_key); - if (!fib_node->info.nh_grp) + if (IS_ERR(fib_node->info.nh_grp)) goto err_nh_grp_get; grp_id = fib_node->info.nh_grp->grp_id; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c index f0e9d6ea88c5..1005182ce3bc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_span.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c @@ -107,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id) entry = prestera_span_entry_find_by_id(sw->span, span_id); if (!entry) - return false; + return -ENOENT; if (!refcount_dec_and_test(&entry->ref_count)) return 0; @@ -151,6 +151,9 @@ int prestera_span_rule_del(struct prestera_flow_block_binding *binding, { int err; + if (binding->span_id == PRESTERA_SPAN_INVALID_ID) + return -ENOENT; + err = prestera_hw_span_unbind(binding->port, ingress); if (err) return err; diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index fe66ba8793cf..45ba0970504a 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -11,8 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o - -# FIXME: temporarily silence -Warray-bounds on non W=1+ builds -ifndef KBUILD_EXTRA_WARN -CFLAGS_mtk_ppe.o += -Wno-array-bounds -endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index a53e205f4a89..be74e1403328 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -115,6 +115,7 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, struct mlx5e_flow_meters *flow_meters; u8 cir_man, cir_exp, cbs_man, cbs_exp; struct mlx5_aso_wqe *aso_wqe; + unsigned long expires; struct mlx5_aso *aso; u64 rate, burst; u8 ds_cnt; @@ -187,7 +188,12 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev, mlx5_aso_post_wqe(aso, true, &aso_wqe->ctrl); /* With newer FW, the wait for the first ASO WQE is more than 2us, put the wait 10ms. */ - err = mlx5_aso_poll_cq(aso, true, 10); + expires = jiffies + msecs_to_jiffies(10); + do { + err = mlx5_aso_poll_cq(aso, true); + if (err) + usleep_range(2, 10); + } while (err && time_is_after_jiffies(expires)); mutex_unlock(&flow_meters->aso_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 5da746da898d..41970067917b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -1405,7 +1405,7 @@ static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_mac MLX5_ACCESS_ASO_OPC_MOD_MACSEC); macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in); mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl); - err = mlx5_aso_poll_cq(maso, false, 10); + err = mlx5_aso_poll_cq(maso, false); mutex_unlock(&aso->aso_lock); return err; @@ -1430,7 +1430,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL); mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl); - err = mlx5_aso_poll_cq(maso, false, 10); + err = mlx5_aso_poll_cq(maso, false); if (err) goto err_out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c index 21e14507ff5c..baa8092f335e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -381,20 +381,12 @@ void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, WRITE_ONCE(doorbell_cseg, NULL); } -int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms) +int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data) { struct mlx5_aso_cq *cq = &aso->cq; struct mlx5_cqe64 *cqe; - unsigned long expires; cqe = mlx5_cqwq_get_cqe(&cq->wq); - - expires = jiffies + msecs_to_jiffies(interval_ms); - while (!cqe && time_is_after_jiffies(expires)) { - usleep_range(2, 10); - cqe = mlx5_cqwq_get_cqe(&cq->wq); - } - if (!cqe) return -ETIMEDOUT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h index d854e01d7fc5..2d40dcf9d42e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h @@ -83,7 +83,7 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt, u32 obj_id, u32 opc_mode); void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data, struct mlx5_wqe_ctrl_seg *doorbell_cseg); -int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data, u32 interval_ms); +int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data); struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn); void mlx5_aso_destroy(struct mlx5_aso *aso); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3ab3e4536b99..8593cafa6368 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -373,10 +373,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, if (ipv6_tun) { key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; key_size += - sizeof(struct nfp_flower_ipv6_udp_tun); + sizeof(struct nfp_flower_ipv6_gre_tun); } else { key_size += - sizeof(struct nfp_flower_ipv4_udp_tun); + sizeof(struct nfp_flower_ipv4_gre_tun); } if (enc_op.key) { diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 62deed210a95..91f10f746dff 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2896,8 +2896,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0), DRV_NAME); - if (IS_ERR(hpreg_res)) { - err = PTR_ERR(hpreg_res); + if (!hpreg_res) { + err = -EBUSY; dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); goto err_out_clear_quattro; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 3cbe4ec46234..7f86068f3ff6 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2476,7 +2476,10 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) port = am65_common_get_port(common, i); dl_port = &port->devlink_port; - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + if (port->ndev) + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + else + attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; attrs.phys.port_number = port->port_id; attrs.switch_id.id_len = sizeof(resource_size_t); memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 25b38a374e3c..dd5919ec408b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -1051,7 +1051,8 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - + /* completion variable to confirm vf association */ + struct completion vf_add; /* Is the current data path through the VF NIC? */ bool data_path_is_vf; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index f066de0da492..9352dad58996 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1580,6 +1580,10 @@ static void netvsc_send_vf(struct net_device *ndev, net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + + if (net_device_ctx->vf_alloc) + complete(&net_device_ctx->vf_add); + netdev_info(ndev, "VF slot %u %s\n", net_device_ctx->vf_serial, net_device_ctx->vf_alloc ? "added" : "removed"); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f08482065ca..89eb4f179a3c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2313,6 +2313,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev) } + /* Fallback path to check synthetic vf with + * help of mac addr + */ + list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { + ndev = hv_get_drvdata(ndev_ctx->device_ctx); + if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { + netdev_notice(vf_netdev, + "falling back to mac addr based matching\n"); + return ndev; + } + } + netdev_notice(vf_netdev, "no netdev found for vf serial:%u\n", serial); return NULL; @@ -2409,6 +2421,11 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) if (net_device_ctx->data_path_is_vf == vf_is_up) return NOTIFY_OK; + if (vf_is_up && !net_device_ctx->vf_alloc) { + netdev_info(ndev, "Waiting for the VF association from host\n"); + wait_for_completion(&net_device_ctx->vf_add); + } + ret = netvsc_switch_datapath(ndev, vf_is_up); if (ret) { @@ -2440,6 +2457,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) netvsc_vf_setxdp(vf_netdev, NULL); + reinit_completion(&net_device_ctx->vf_add); netdev_rx_handler_unregister(vf_netdev); netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); @@ -2479,6 +2497,7 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); + init_completion(&net_device_ctx->vf_add); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 713e3354cb2e..8f8f73099de8 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1192,7 +1192,7 @@ void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); - dev->min_mtu = 0; + /* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 3757e069c486..54a17b576eac 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1838,7 +1838,7 @@ static int ksz886x_cable_test_start(struct phy_device *phydev) return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100); } -static int ksz886x_cable_test_result_trans(u16 status, u16 mask) +static __always_inline int ksz886x_cable_test_result_trans(u16 status, u16 mask) { switch (FIELD_GET(mask, status)) { case KSZ8081_LMD_STAT_NORMAL: @@ -1854,13 +1854,13 @@ static int ksz886x_cable_test_result_trans(u16 status, u16 mask) } } -static bool ksz886x_cable_test_failed(u16 status, u16 mask) +static __always_inline bool ksz886x_cable_test_failed(u16 status, u16 mask) { return FIELD_GET(mask, status) == KSZ8081_LMD_STAT_FAIL; } -static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask) +static __always_inline bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask) { switch (FIELD_GET(mask, status)) { case KSZ8081_LMD_STAT_OPEN: @@ -1871,7 +1871,8 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask) return false; } -static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask) +static __always_inline int ksz886x_cable_test_fault_length(struct phy_device *phydev, + u16 status, u16 data_mask) { int dt; diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 29e3fa86bac3..daac293e8ede 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -257,6 +257,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFF8024_ECC_100GBASE_SR4_25GBASE_SR: phylink_set(modes, 100000baseSR4_Full); phylink_set(modes, 25000baseSR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); break; case SFF8024_ECC_100GBASE_LR4_25GBASE_LR: case SFF8024_ECC_100GBASE_ER4_25GBASE_ER: @@ -268,6 +269,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFF8024_ECC_25GBASE_CR_S: case SFF8024_ECC_25GBASE_CR_N: phylink_set(modes, 25000baseCR_Full); + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); break; case SFF8024_ECC_10GBASE_T_SFI: case SFF8024_ECC_10GBASE_T_SR: @@ -276,6 +278,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, break; case SFF8024_ECC_5GBASE_T: phylink_set(modes, 5000baseT_Full); + __set_bit(PHY_INTERFACE_MODE_5GBASER, interfaces); break; case SFF8024_ECC_2_5GBASE_T: phylink_set(modes, 2500baseT_Full); diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig index 73d163704068..687dec49c1e1 100644 --- a/drivers/net/pse-pd/Kconfig +++ b/drivers/net/pse-pd/Kconfig @@ -14,6 +14,7 @@ if PSE_CONTROLLER config PSE_REGULATOR tristate "Regulator based PSE controller" + depends on REGULATOR || COMPILE_TEST help This module provides support for simple regulator based Ethernet Power Sourcing Equipment without automatic classification support. For diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 84d956ad4093..2d1e3fd9b526 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2081,7 +2081,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; - u16 *he_mcs_mask; + u16 he_mcs_mask[NL80211_HE_NSS_MAX]; u8 max_nss, he_mcs; u16 he_tx_mcs = 0, v = 0; int i, he_nss, nss_idx; @@ -2098,7 +2098,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar, return; band = def.chan->band; - he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; + memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs, + sizeof(he_mcs_mask)); if (ath11k_peer_assoc_h_he_masked(he_mcs_mask)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index cc92706b3d16..cbd8053a9e35 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -384,6 +384,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + list_del_init(&mvmtxq->list); } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ @@ -478,6 +479,7 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + list_del_init(&mvmtxq->list); } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index df51b5b1f171..a40636c90ec3 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4973,6 +4973,8 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, } rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); + if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates) + goto out; rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); hdr = (void *)skb->data; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 4901aa02b4fb..7378c4d1e156 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -696,10 +696,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) skb_reserve(skb, q->buf_offset); - if (q == &dev->q_rx[MT_RXQ_MCU]) { - u32 *rxfce = (u32 *)skb->cb; - *rxfce = info; - } + *(u32 *)skb->cb = info; __skb_put(skb, len); done++; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index d6aae60c440d..2ce1705c0f43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -345,6 +345,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM; + u32 csum_status = *(u32 *)skb->cb; bool unicast, hdr_trans, remove_pad, insert_ccmp_hdr = false; u16 hdr_gap; int phy_idx; @@ -394,7 +395,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) spin_unlock_bh(&dev->sta_poll_lock); } - if ((rxd0 & csum_mask) == csum_mask) + if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY; if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) @@ -610,14 +612,14 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) * When header translation failure is indicated, * the hardware will insert an extra 2-byte field * containing the data length after the protocol - * type field. + * type field. This happens either when the LLC-SNAP + * pattern did not match, or if a VLAN header was + * detected. */ pad_start = 12; if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) pad_start += 4; - - if (get_unaligned_be16(skb->data + pad_start) != - skb->len - pad_start - 2) + else pad_start = 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index be97dede2634..a4bcc617c1a3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -233,6 +233,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) u8 remove_pad, amsdu_info; u8 mode = 0, qos_ctl = 0; struct mt7915_sta *msta = NULL; + u32 csum_status = *(u32 *)skb->cb; bool hdr_trans; u16 hdr_gap; u16 seq_ctrl = 0; @@ -288,7 +289,8 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; - if ((rxd0 & csum_mask) == csum_mask) + if ((rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY; if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) @@ -446,14 +448,14 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) * When header translation failure is indicated, * the hardware will insert an extra 2-byte field * containing the data length after the protocol - * type field. + * type field. This happens either when the LLC-SNAP + * pattern did not match, or if a VLAN header was + * detected. */ pad_start = 12; if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) pad_start += 4; - - if (get_unaligned_be16(skb->data + pad_start) != - skb->len - pad_start - 2) + else pad_start = 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index e4868c492bc0..650ab97ae052 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -230,6 +230,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) struct mt76_phy *mphy = &dev->mt76.phy; struct mt7921_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; + u32 csum_status = *(u32 *)skb->cb; u32 rxd0 = le32_to_cpu(rxd[0]); u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); @@ -290,7 +291,8 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) if (!sband->channels) return -EINVAL; - if ((rxd0 & csum_mask) == csum_mask) + if (mt76_is_mmio(&dev->mt76) && (rxd0 & csum_mask) == csum_mask && + !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) skb->ip_summed = CHECKSUM_UNNECESSARY; if (rxd1 & MT_RXD1_NORMAL_FCS_ERR) diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index e67cc7909bce..6c054850363f 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -60,14 +60,20 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) .skb = skb, .info = IEEE80211_SKB_CB(skb), }; + struct ieee80211_rate_status rs = {}; struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); struct mt76_wcid *wcid; wcid = rcu_dereference(dev->wcid[cb->wcid]); if (wcid) { status.sta = wcid_to_sta(wcid); - status.rates = NULL; - status.n_rates = 0; + if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { + rs.rate_idx = wcid->rate; + status.rates = &rs; + status.n_rates = 1; + } else { + status.n_rates = 0; + } } hw = mt76_tx_status_get_hw(dev, skb); diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 689271c4245c..7378e2f3e525 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -521,24 +521,14 @@ static int pcifront_rescan_root(struct pcifront_device *pdev, int err; struct pci_bus *b; -#ifndef CONFIG_PCI_DOMAINS - if (domain != 0) { - dev_err(&pdev->xdev->dev, - "PCI Root in non-zero PCI Domain! domain=%d\n", domain); - dev_err(&pdev->xdev->dev, - "Please compile with CONFIG_PCI_DOMAINS\n"); - return -EINVAL; - } -#endif - - dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n", - domain, bus); - b = pci_find_bus(domain, bus); if (!b) /* If the bus is unknown, create it. */ return pcifront_scan_root(pdev, domain, bus); + dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n", + domain, bus); + err = pcifront_scan_bus(pdev, domain, bus, b); /* Claim resources before going "live" with our devices */ @@ -819,76 +809,73 @@ out: return err; } -static int pcifront_try_connect(struct pcifront_device *pdev) +static void pcifront_connect(struct pcifront_device *pdev) { - int err = -EFAULT; + int err; int i, num_roots, len; char str[64]; unsigned int domain, bus; - - /* Only connect once */ - if (xenbus_read_driver_state(pdev->xdev->nodename) != - XenbusStateInitialised) - goto out; - - err = pcifront_connect_and_init_dma(pdev); - if (err && err != -EEXIST) { - xenbus_dev_fatal(pdev->xdev, err, - "Error setting up PCI Frontend"); - goto out; - } - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num", "%d", &num_roots); if (err == -ENOENT) { xenbus_dev_error(pdev->xdev, err, "No PCI Roots found, trying 0000:00"); - err = pcifront_scan_root(pdev, 0, 0); + err = pcifront_rescan_root(pdev, 0, 0); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error scanning PCI root 0000:00"); - goto out; + return; } num_roots = 0; } else if (err != 1) { - if (err == 0) - err = -EINVAL; - xenbus_dev_fatal(pdev->xdev, err, + xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err, "Error reading number of PCI roots"); - goto out; + return; } for (i = 0; i < num_roots; i++) { len = snprintf(str, sizeof(str), "root-%d", i); - if (unlikely(len >= (sizeof(str) - 1))) { - err = -ENOMEM; - goto out; - } + if (unlikely(len >= (sizeof(str) - 1))) + return; err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%x:%x", &domain, &bus); if (err != 2) { - if (err >= 0) - err = -EINVAL; - xenbus_dev_fatal(pdev->xdev, err, + xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err, "Error reading PCI root %d", i); - goto out; + return; } - err = pcifront_scan_root(pdev, domain, bus); + err = pcifront_rescan_root(pdev, domain, bus); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error scanning PCI root %04x:%02x", domain, bus); - goto out; + return; } } - err = xenbus_switch_state(pdev->xdev, XenbusStateConnected); + xenbus_switch_state(pdev->xdev, XenbusStateConnected); +} -out: - return err; +static void pcifront_try_connect(struct pcifront_device *pdev) +{ + int err; + + /* Only connect once */ + if (xenbus_read_driver_state(pdev->xdev->nodename) != + XenbusStateInitialised) + return; + + err = pcifront_connect_and_init_dma(pdev); + if (err && err != -EEXIST) { + xenbus_dev_fatal(pdev->xdev, err, + "Error setting up PCI Frontend"); + return; + } + + pcifront_connect(pdev); } static int pcifront_try_disconnect(struct pcifront_device *pdev) @@ -914,80 +901,37 @@ out: return err; } -static int pcifront_attach_devices(struct pcifront_device *pdev) +static void pcifront_attach_devices(struct pcifront_device *pdev) { - int err = -EFAULT; - int i, num_roots, len; - unsigned int domain, bus; - char str[64]; - - if (xenbus_read_driver_state(pdev->xdev->nodename) != + if (xenbus_read_driver_state(pdev->xdev->nodename) == XenbusStateReconfiguring) - goto out; - - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, - "root_num", "%d", &num_roots); - if (err == -ENOENT) { - xenbus_dev_error(pdev->xdev, err, - "No PCI Roots found, trying 0000:00"); - err = pcifront_rescan_root(pdev, 0, 0); - if (err) { - xenbus_dev_fatal(pdev->xdev, err, - "Error scanning PCI root 0000:00"); - goto out; - } - num_roots = 0; - } else if (err != 1) { - if (err == 0) - err = -EINVAL; - xenbus_dev_fatal(pdev->xdev, err, - "Error reading number of PCI roots"); - goto out; - } - - for (i = 0; i < num_roots; i++) { - len = snprintf(str, sizeof(str), "root-%d", i); - if (unlikely(len >= (sizeof(str) - 1))) { - err = -ENOMEM; - goto out; - } - - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, - "%x:%x", &domain, &bus); - if (err != 2) { - if (err >= 0) - err = -EINVAL; - xenbus_dev_fatal(pdev->xdev, err, - "Error reading PCI root %d", i); - goto out; - } - - err = pcifront_rescan_root(pdev, domain, bus); - if (err) { - xenbus_dev_fatal(pdev->xdev, err, - "Error scanning PCI root %04x:%02x", - domain, bus); - goto out; - } - } - - xenbus_switch_state(pdev->xdev, XenbusStateConnected); - -out: - return err; + pcifront_connect(pdev); } static int pcifront_detach_devices(struct pcifront_device *pdev) { int err = 0; int i, num_devs; + enum xenbus_state state; unsigned int domain, bus, slot, func; struct pci_dev *pci_dev; char str[64]; - if (xenbus_read_driver_state(pdev->xdev->nodename) != - XenbusStateConnected) + state = xenbus_read_driver_state(pdev->xdev->nodename); + if (state == XenbusStateInitialised) { + dev_dbg(&pdev->xdev->dev, "Handle skipped connect.\n"); + /* We missed Connected and need to initialize. */ + err = pcifront_connect_and_init_dma(pdev); + if (err && err != -EEXIST) { + xenbus_dev_fatal(pdev->xdev, err, + "Error setting up PCI Frontend"); + goto out; + } + + goto out_switch_state; + } else if (state != XenbusStateConnected) { goto out; + } err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d", &num_devs); @@ -1048,6 +992,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev) domain, bus, slot, func); } + out_switch_state: err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring); out: diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index ecf65237b263..6be896871718 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -639,7 +639,7 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id) if (!(regval & PIN_IRQ_PENDING) || !(regval & BIT(INTERRUPT_MASK_OFF))) continue; - generic_handle_domain_irq(gc->irq.domain, irqnr + i); + generic_handle_domain_irq_safe(gc->irq.domain, irqnr + i); /* Clear interrupt. * We must read the pin register again, in case the diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index b437847b6237..dbd327712205 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -3,6 +3,8 @@ if MIPS source "drivers/platform/mips/Kconfig" endif +source "drivers/platform/loongarch/Kconfig" + source "drivers/platform/goldfish/Kconfig" source "drivers/platform/chrome/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 4de08ef4ec9d..41640172975a 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_X86) += x86/ +obj-$(CONFIG_LOONGARCH) += loongarch/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig new file mode 100644 index 000000000000..5633e4d73991 --- /dev/null +++ b/drivers/platform/loongarch/Kconfig @@ -0,0 +1,31 @@ +# +# LoongArch Platform Specific Drivers +# + +menuconfig LOONGARCH_PLATFORM_DEVICES + bool "LoongArch Platform Specific Device Drivers" + default y + depends on LOONGARCH + help + Say Y here to get to see options for device drivers of various + LoongArch platforms, including vendor-specific laptop/desktop + extension and hardware monitor drivers. This option itself does + not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if LOONGARCH_PLATFORM_DEVICES + +config LOONGSON_LAPTOP + tristate "Generic Loongson-3 Laptop Driver" + depends on ACPI + depends on BACKLIGHT_CLASS_DEVICE + depends on INPUT + depends on MACH_LOONGSON64 + select ACPI_VIDEO + select INPUT_SPARSEKMAP + default y + help + ACPI-based Loongson-3 family laptops generic driver. + +endif # LOONGARCH_PLATFORM_DEVICES diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile new file mode 100644 index 000000000000..f43ab03db1a2 --- /dev/null +++ b/drivers/platform/loongarch/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c new file mode 100644 index 000000000000..f0166ad5d2c2 --- /dev/null +++ b/drivers/platform/loongarch/loongson-laptop.c @@ -0,0 +1,624 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generic Loongson processor based LAPTOP/ALL-IN-ONE driver + * + * Jianmin Lv <[email protected]> + * Huacai Chen <[email protected]> + * + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/backlight.h> +#include <linux/device.h> +#include <linux/input.h> +#include <linux/input/sparse-keymap.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <linux/types.h> +#include <acpi/video.h> + +/* 1. Driver-wide structs and misc. variables */ + +/* ACPI HIDs */ +#define LOONGSON_ACPI_EC_HID "PNP0C09" +#define LOONGSON_ACPI_HKEY_HID "LOON0000" + +#define ACPI_LAPTOP_NAME "loongson-laptop" +#define ACPI_LAPTOP_ACPI_EVENT_PREFIX "loongson" + +#define MAX_ACPI_ARGS 3 +#define GENERIC_HOTKEY_MAP_MAX 64 + +#define GENERIC_EVENT_TYPE_OFF 12 +#define GENERIC_EVENT_TYPE_MASK 0xF000 +#define GENERIC_EVENT_CODE_MASK 0x0FFF + +struct generic_sub_driver { + u32 type; + char *name; + acpi_handle *handle; + struct acpi_device *device; + struct platform_driver *driver; + int (*init)(struct generic_sub_driver *sub_driver); + void (*notify)(struct generic_sub_driver *sub_driver, u32 event); + u8 acpi_notify_installed; +}; + +static u32 input_device_registered; +static struct input_dev *generic_inputdev; + +static acpi_handle hotkey_handle; +static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX]; + +int loongson_laptop_turn_on_backlight(void); +int loongson_laptop_turn_off_backlight(void); +static int loongson_laptop_backlight_update(struct backlight_device *bd); + +/* 2. ACPI Helpers and device model */ + +static int acpi_evalf(acpi_handle handle, int *res, char *method, char *fmt, ...) +{ + char res_type; + char *fmt0 = fmt; + va_list ap; + int success, quiet; + acpi_status status; + struct acpi_object_list params; + struct acpi_buffer result, *resultp; + union acpi_object in_objs[MAX_ACPI_ARGS], out_obj; + + if (!*fmt) { + pr_err("acpi_evalf() called with empty format\n"); + return 0; + } + + if (*fmt == 'q') { + quiet = 1; + fmt++; + } else + quiet = 0; + + res_type = *(fmt++); + + params.count = 0; + params.pointer = &in_objs[0]; + + va_start(ap, fmt); + while (*fmt) { + char c = *(fmt++); + switch (c) { + case 'd': /* int */ + in_objs[params.count].integer.value = va_arg(ap, int); + in_objs[params.count++].type = ACPI_TYPE_INTEGER; + break; + /* add more types as needed */ + default: + pr_err("acpi_evalf() called with invalid format character '%c'\n", c); + va_end(ap); + return 0; + } + } + va_end(ap); + + if (res_type != 'v') { + result.length = sizeof(out_obj); + result.pointer = &out_obj; + resultp = &result; + } else + resultp = NULL; + + status = acpi_evaluate_object(handle, method, ¶ms, resultp); + + switch (res_type) { + case 'd': /* int */ + success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER); + if (success && res) + *res = out_obj.integer.value; + break; + case 'v': /* void */ + success = status == AE_OK; + break; + /* add more types as needed */ + default: + pr_err("acpi_evalf() called with invalid format character '%c'\n", res_type); + return 0; + } + + if (!success && !quiet) + pr_err("acpi_evalf(%s, %s, ...) failed: %s\n", + method, fmt0, acpi_format_exception(status)); + + return success; +} + +static int hotkey_status_get(int *status) +{ + if (!acpi_evalf(hotkey_handle, status, "GSWS", "d")) + return -EIO; + + return 0; +} + +static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data) +{ + struct generic_sub_driver *sub_driver = data; + + if (!sub_driver || !sub_driver->notify) + return; + sub_driver->notify(sub_driver, event); +} + +static int __init setup_acpi_notify(struct generic_sub_driver *sub_driver) +{ + acpi_status status; + + if (!*sub_driver->handle) + return 0; + + sub_driver->device = acpi_fetch_acpi_dev(*sub_driver->handle); + if (!sub_driver->device) { + pr_err("acpi_fetch_acpi_dev(%s) failed\n", sub_driver->name); + return -ENODEV; + } + + sub_driver->device->driver_data = sub_driver; + sprintf(acpi_device_class(sub_driver->device), "%s/%s", + ACPI_LAPTOP_ACPI_EVENT_PREFIX, sub_driver->name); + + status = acpi_install_notify_handler(*sub_driver->handle, + sub_driver->type, dispatch_acpi_notify, sub_driver); + if (ACPI_FAILURE(status)) { + if (status == AE_ALREADY_EXISTS) { + pr_notice("Another device driver is already " + "handling %s events\n", sub_driver->name); + } else { + pr_err("acpi_install_notify_handler(%s) failed: %s\n", + sub_driver->name, acpi_format_exception(status)); + } + return -ENODEV; + } + sub_driver->acpi_notify_installed = 1; + + return 0; +} + +static int loongson_hotkey_suspend(struct device *dev) +{ + return 0; +} + +static int loongson_hotkey_resume(struct device *dev) +{ + int status = 0; + struct key_entry ke; + struct backlight_device *bd; + + /* + * Only if the firmware supports SW_LID event model, we can handle the + * event. This is for the consideration of development board without EC. + */ + if (test_bit(SW_LID, generic_inputdev->swbit)) { + if (hotkey_status_get(&status) < 0) + return -EIO; + /* + * The input device sw element records the last lid status. + * When the system is awakened by other wake-up sources, + * the lid event will also be reported. The judgment of + * adding SW_LID bit which in sw element can avoid this + * case. + * + * Input system will drop lid event when current lid event + * value and last lid status in the same. So laptop driver + * doesn't report repeated events. + * + * Lid status is generally 0, but hardware exception is + * considered. So add lid status confirmation. + */ + if (test_bit(SW_LID, generic_inputdev->sw) && !(status & (1 << SW_LID))) { + ke.type = KE_SW; + ke.sw.value = (u8)status; + ke.sw.code = SW_LID; + sparse_keymap_report_entry(generic_inputdev, &ke, 1, true); + } + } + + bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM); + if (bd) { + loongson_laptop_backlight_update(bd) ? + pr_warn("Loongson_backlight: resume brightness failed") : + pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness); + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(loongson_hotkey_pm, + loongson_hotkey_suspend, loongson_hotkey_resume); + +static int loongson_hotkey_probe(struct platform_device *pdev) +{ + hotkey_handle = ACPI_HANDLE(&pdev->dev); + + if (!hotkey_handle) + return -ENODEV; + + return 0; +} + +static const struct acpi_device_id loongson_device_ids[] = { + {LOONGSON_ACPI_HKEY_HID, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, loongson_device_ids); + +static struct platform_driver loongson_hotkey_driver = { + .probe = loongson_hotkey_probe, + .driver = { + .name = "loongson-hotkey", + .owner = THIS_MODULE, + .pm = pm_ptr(&loongson_hotkey_pm), + .acpi_match_table = loongson_device_ids, + }, +}; + +static int hotkey_map(void) +{ + u32 index; + acpi_status status; + struct acpi_buffer buf; + union acpi_object *pack; + + buf.length = ACPI_ALLOCATE_BUFFER; + status = acpi_evaluate_object_typed(hotkey_handle, "KMAP", NULL, &buf, ACPI_TYPE_PACKAGE); + if (status != AE_OK) { + pr_err("ACPI exception: %s\n", acpi_format_exception(status)); + return -1; + } + pack = buf.pointer; + for (index = 0; index < pack->package.count; index++) { + union acpi_object *element, *sub_pack; + + sub_pack = &pack->package.elements[index]; + + element = &sub_pack->package.elements[0]; + hotkey_keycode_map[index].type = element->integer.value; + element = &sub_pack->package.elements[1]; + hotkey_keycode_map[index].code = element->integer.value; + element = &sub_pack->package.elements[2]; + hotkey_keycode_map[index].keycode = element->integer.value; + } + + return 0; +} + +static int hotkey_backlight_set(bool enable) +{ + if (!acpi_evalf(hotkey_handle, NULL, "VCBL", "vd", enable ? 1 : 0)) + return -EIO; + + return 0; +} + +static int ec_get_brightness(void) +{ + int status = 0; + + if (!hotkey_handle) + return -ENXIO; + + if (!acpi_evalf(hotkey_handle, &status, "ECBG", "d")) + return -EIO; + + return status; +} + +static int ec_set_brightness(int level) +{ + + int ret = 0; + + if (!hotkey_handle) + return -ENXIO; + + if (!acpi_evalf(hotkey_handle, NULL, "ECBS", "vd", level)) + ret = -EIO; + + return ret; +} + +static int ec_backlight_level(u8 level) +{ + int status = 0; + + if (!hotkey_handle) + return -ENXIO; + + if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d")) + return -EIO; + + if ((status < 0) || (level > status)) + return status; + + if (!acpi_evalf(hotkey_handle, &status, "ECSL", "d")) + return -EIO; + + if ((status < 0) || (level < status)) + return status; + + return level; +} + +static int loongson_laptop_backlight_update(struct backlight_device *bd) +{ + int lvl = ec_backlight_level(bd->props.brightness); + + if (lvl < 0) + return -EIO; + if (ec_set_brightness(lvl)) + return -EIO; + + return 0; +} + +static int loongson_laptop_get_brightness(struct backlight_device *bd) +{ + int level; + + level = ec_get_brightness(); + if (level < 0) + return -EIO; + + return level; +} + +static const struct backlight_ops backlight_laptop_ops = { + .update_status = loongson_laptop_backlight_update, + .get_brightness = loongson_laptop_get_brightness, +}; + +static int laptop_backlight_register(void) +{ + int status = 0; + struct backlight_properties props; + + memset(&props, 0, sizeof(props)); + + if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d")) + return -EIO; + + props.brightness = 1; + props.max_brightness = status; + props.type = BACKLIGHT_PLATFORM; + + backlight_device_register("loongson_laptop", + NULL, NULL, &backlight_laptop_ops, &props); + + return 0; +} + +int loongson_laptop_turn_on_backlight(void) +{ + int status; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { 1, &arg0 }; + + arg0.integer.value = 1; + status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL); + if (ACPI_FAILURE(status)) { + pr_info("Loongson lvds error: 0x%x\n", status); + return -ENODEV; + } + + return 0; +} + +int loongson_laptop_turn_off_backlight(void) +{ + int status; + union acpi_object arg0 = { ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { 1, &arg0 }; + + arg0.integer.value = 0; + status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL); + if (ACPI_FAILURE(status)) { + pr_info("Loongson lvds error: 0x%x\n", status); + return -ENODEV; + } + + return 0; +} + +static int __init event_init(struct generic_sub_driver *sub_driver) +{ + int ret; + + ret = hotkey_map(); + if (ret < 0) { + pr_err("Failed to parse keymap from DSDT\n"); + return ret; + } + + ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL); + if (ret < 0) { + pr_err("Failed to setup input device keymap\n"); + input_free_device(generic_inputdev); + + return ret; + } + + /* + * This hotkey driver handle backlight event when + * acpi_video_get_backlight_type() gets acpi_backlight_vendor + */ + if (acpi_video_get_backlight_type() == acpi_backlight_vendor) + hotkey_backlight_set(true); + else + hotkey_backlight_set(false); + + pr_info("ACPI: enabling firmware HKEY event interface...\n"); + + return ret; +} + +static void event_notify(struct generic_sub_driver *sub_driver, u32 event) +{ + int type, scan_code; + struct key_entry *ke = NULL; + + scan_code = event & GENERIC_EVENT_CODE_MASK; + type = (event & GENERIC_EVENT_TYPE_MASK) >> GENERIC_EVENT_TYPE_OFF; + ke = sparse_keymap_entry_from_scancode(generic_inputdev, scan_code); + if (ke) { + if (type == KE_SW) { + int status = 0; + + if (hotkey_status_get(&status) < 0) + return; + + ke->sw.value = !!(status & (1 << ke->sw.code)); + } + sparse_keymap_report_entry(generic_inputdev, ke, 1, true); + } +} + +/* 3. Infrastructure */ + +static void generic_subdriver_exit(struct generic_sub_driver *sub_driver); + +static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver) +{ + int ret; + + if (!sub_driver || !sub_driver->driver) + return -EINVAL; + + ret = platform_driver_register(sub_driver->driver); + if (ret) + return -EINVAL; + + if (sub_driver->init) + sub_driver->init(sub_driver); + + if (sub_driver->notify) { + ret = setup_acpi_notify(sub_driver); + if (ret == -ENODEV) { + ret = 0; + goto err_out; + } + if (ret < 0) + goto err_out; + } + + return 0; + +err_out: + generic_subdriver_exit(sub_driver); + return (ret < 0) ? ret : 0; +} + +static void generic_subdriver_exit(struct generic_sub_driver *sub_driver) +{ + + if (sub_driver->acpi_notify_installed) { + acpi_remove_notify_handler(*sub_driver->handle, + sub_driver->type, dispatch_acpi_notify); + sub_driver->acpi_notify_installed = 0; + } + platform_driver_unregister(sub_driver->driver); +} + +static struct generic_sub_driver generic_sub_drivers[] __refdata = { + { + .name = "hotkey", + .init = event_init, + .notify = event_notify, + .handle = &hotkey_handle, + .type = ACPI_DEVICE_NOTIFY, + .driver = &loongson_hotkey_driver, + }, +}; + +static int __init generic_acpi_laptop_init(void) +{ + bool ec_found; + int i, ret, status; + + if (acpi_disabled) + return -ENODEV; + + /* The EC device is required */ + ec_found = acpi_dev_found(LOONGSON_ACPI_EC_HID); + if (!ec_found) + return -ENODEV; + + /* Enable SCI for EC */ + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); + + generic_inputdev = input_allocate_device(); + if (!generic_inputdev) { + pr_err("Unable to allocate input device\n"); + return -ENOMEM; + } + + /* Prepare input device, but don't register */ + generic_inputdev->name = + "Loongson Generic Laptop/All-in-One Extra Buttons"; + generic_inputdev->phys = ACPI_LAPTOP_NAME "/input0"; + generic_inputdev->id.bustype = BUS_HOST; + generic_inputdev->dev.parent = NULL; + + /* Init subdrivers */ + for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) { + ret = generic_subdriver_init(&generic_sub_drivers[i]); + if (ret < 0) { + input_free_device(generic_inputdev); + while (--i >= 0) + generic_subdriver_exit(&generic_sub_drivers[i]); + return ret; + } + } + + ret = input_register_device(generic_inputdev); + if (ret < 0) { + input_free_device(generic_inputdev); + while (--i >= 0) + generic_subdriver_exit(&generic_sub_drivers[i]); + pr_err("Unable to register input device\n"); + return ret; + } + + input_device_registered = 1; + + if (acpi_evalf(hotkey_handle, &status, "ECBG", "d")) { + pr_info("Loongson Laptop used, init brightness is 0x%x\n", status); + ret = laptop_backlight_register(); + if (ret < 0) + pr_err("Loongson Laptop: laptop-backlight device register failed\n"); + } + + return 0; +} + +static void __exit generic_acpi_laptop_exit(void) +{ + if (generic_inputdev) { + if (input_device_registered) + input_unregister_device(generic_inputdev); + else + input_free_device(generic_inputdev); + } +} + +module_init(generic_acpi_laptop_init); +module_exit(generic_acpi_laptop_exit); + +MODULE_AUTHOR("Jianmin Lv <[email protected]>"); +MODULE_AUTHOR("Huacai Chen <[email protected]>"); +MODULE_DESCRIPTION("Loongson Laptop/All-in-One ACPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c index 617dbf98980e..97cfbc520a02 100644 --- a/drivers/platform/x86/intel/int0002_vgpio.c +++ b/drivers/platform/x86/intel/int0002_vgpio.c @@ -125,8 +125,7 @@ static irqreturn_t int0002_irq(int irq, void *data) if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT)) return IRQ_NONE; - generic_handle_irq(irq_find_mapping(chip->irq.domain, - GPE0A_PME_B0_VIRT_GPIO_PIN)); + generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN); pm_wakeup_hard_event(chip->parent); diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index d36c3f597f77..a48d9b7d2921 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -3657,6 +3657,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp) struct device *dev = &bp->dev; sysfs_remove_link(&dev->kobj, "ttyGNSS"); + sysfs_remove_link(&dev->kobj, "ttyGNSS2"); sysfs_remove_link(&dev->kobj, "ttyMAC"); sysfs_remove_link(&dev->kobj, "ptp"); sysfs_remove_link(&dev->kobj, "pps"); diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 86d9e428357b..7f5402fe857a 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -12,7 +12,6 @@ #include <linux/module.h> #include <linux/init.h> -#include <linux/device.h> #include <linux/slab.h> #include <linux/mdev.h> @@ -142,7 +141,6 @@ static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) INIT_LIST_HEAD(&private->crw); INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); - atomic_set(&private->avail, 1); private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), GFP_KERNEL); @@ -203,7 +201,6 @@ static void vfio_ccw_free_private(struct vfio_ccw_private *private) mutex_destroy(&private->io_mutex); kfree(private); } - static int vfio_ccw_sch_probe(struct subchannel *sch) { struct pmcw *pmcw = &sch->schib.pmcw; @@ -222,7 +219,12 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) dev_set_drvdata(&sch->dev, private); - ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); + private->mdev_type.sysfs_name = "io"; + private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; + private->mdev_types[0] = &private->mdev_type; + ret = mdev_register_parent(&private->parent, &sch->dev, + &vfio_ccw_mdev_driver, + private->mdev_types, 1); if (ret) goto out_free; @@ -241,7 +243,7 @@ static void vfio_ccw_sch_remove(struct subchannel *sch) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); - mdev_unregister_device(&sch->dev); + mdev_unregister_parent(&private->parent); dev_set_drvdata(&sch->dev, NULL); diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 4a806a2273b5..6ae4d012d800 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -11,7 +11,6 @@ */ #include <linux/vfio.h> -#include <linux/mdev.h> #include <linux/nospec.h> #include <linux/slab.h> @@ -45,47 +44,14 @@ static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length) vfio_ccw_mdev_reset(private); } -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); -} -static MDEV_TYPE_ATTR_RO(name); - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); -} -static MDEV_TYPE_ATTR_RO(device_api); - -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) +static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev) { struct vfio_ccw_private *private = - dev_get_drvdata(mtype_get_parent_dev(mtype)); + container_of(vdev, struct vfio_ccw_private, vdev); - return sprintf(buf, "%d\n", atomic_read(&private->avail)); + init_completion(&private->release_comp); + return 0; } -static MDEV_TYPE_ATTR_RO(available_instances); - -static struct attribute *mdev_types_attrs[] = { - &mdev_type_attr_name.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_available_instances.attr, - NULL, -}; - -static struct attribute_group mdev_type_group = { - .name = "io", - .attrs = mdev_types_attrs, -}; - -static struct attribute_group *mdev_type_groups[] = { - &mdev_type_group, - NULL, -}; static int vfio_ccw_mdev_probe(struct mdev_device *mdev) { @@ -95,12 +61,9 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev) if (private->state == VFIO_CCW_STATE_NOT_OPER) return -ENODEV; - if (atomic_dec_if_positive(&private->avail) < 0) - return -EPERM; - - memset(&private->vdev, 0, sizeof(private->vdev)); - vfio_init_group_dev(&private->vdev, &mdev->dev, - &vfio_ccw_dev_ops); + ret = vfio_init_device(&private->vdev, &mdev->dev, &vfio_ccw_dev_ops); + if (ret) + return ret; VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n", private->sch->schid.cssid, @@ -109,16 +72,32 @@ static int vfio_ccw_mdev_probe(struct mdev_device *mdev) ret = vfio_register_emulated_iommu_dev(&private->vdev); if (ret) - goto err_atomic; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, private); return 0; -err_atomic: - vfio_uninit_group_dev(&private->vdev); - atomic_inc(&private->avail); +err_put_vdev: + vfio_put_device(&private->vdev); return ret; } +static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev) +{ + struct vfio_ccw_private *private = + container_of(vdev, struct vfio_ccw_private, vdev); + + /* + * We cannot free vfio_ccw_private here because it includes + * parent info which must be free'ed by css driver. + * + * Use a workaround by memset'ing the core device part and + * then notifying the remove path that all active references + * to this device have been released. + */ + memset(vdev, 0, sizeof(*vdev)); + complete(&private->release_comp); +} + static void vfio_ccw_mdev_remove(struct mdev_device *mdev) { struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent); @@ -130,8 +109,16 @@ static void vfio_ccw_mdev_remove(struct mdev_device *mdev) vfio_unregister_group_dev(&private->vdev); - vfio_uninit_group_dev(&private->vdev); - atomic_inc(&private->avail); + vfio_put_device(&private->vdev); + /* + * Wait for all active references on mdev are released so it + * is safe to defer kfree() to a later point. + * + * TODO: the clean fix is to split parent/mdev info from ccw + * private structure so each can be managed in its own life + * cycle. + */ + wait_for_completion(&private->release_comp); } static int vfio_ccw_mdev_open_device(struct vfio_device *vdev) @@ -592,6 +579,8 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count) } static const struct vfio_device_ops vfio_ccw_dev_ops = { + .init = vfio_ccw_mdev_init_dev, + .release = vfio_ccw_mdev_release_dev, .open_device = vfio_ccw_mdev_open_device, .close_device = vfio_ccw_mdev_close_device, .read = vfio_ccw_mdev_read, @@ -602,6 +591,8 @@ static const struct vfio_device_ops vfio_ccw_dev_ops = { }; struct mdev_driver vfio_ccw_mdev_driver = { + .device_api = VFIO_DEVICE_API_CCW_STRING, + .max_instances = 1, .driver = { .name = "vfio_ccw_mdev", .owner = THIS_MODULE, @@ -609,5 +600,4 @@ struct mdev_driver vfio_ccw_mdev_driver = { }, .probe = vfio_ccw_mdev_probe, .remove = vfio_ccw_mdev_remove, - .supported_type_groups = mdev_type_groups, }; diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index cd24b7fada91..bd5fb81456af 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -18,6 +18,7 @@ #include <linux/workqueue.h> #include <linux/vfio_ccw.h> #include <linux/vfio.h> +#include <linux/mdev.h> #include <asm/crw.h> #include <asm/debug.h> @@ -72,7 +73,6 @@ struct vfio_ccw_crw { * @sch: pointer to the subchannel * @state: internal state of the device * @completion: synchronization helper of the I/O completion - * @avail: available for creating a mediated device * @io_region: MMIO region to input/output I/O arguments/results * @io_mutex: protect against concurrent update of I/O regions * @region: additional regions for other subchannel operations @@ -88,13 +88,14 @@ struct vfio_ccw_crw { * @req_trigger: eventfd ctx for signaling userspace to return device * @io_work: work for deferral process of I/O handling * @crw_work: work for deferral process of CRW handling + * @release_comp: synchronization helper for vfio device release + * @parent: parent data structures for mdevs created */ struct vfio_ccw_private { struct vfio_device vdev; struct subchannel *sch; int state; struct completion *completion; - atomic_t avail; struct ccw_io_region *io_region; struct mutex io_mutex; struct vfio_ccw_region *region; @@ -113,6 +114,12 @@ struct vfio_ccw_private { struct eventfd_ctx *req_trigger; struct work_struct io_work; struct work_struct crw_work; + + struct completion release_comp; + + struct mdev_parent parent; + struct mdev_type mdev_type; + struct mdev_type *mdev_types[1]; } __aligned(8); int vfio_ccw_sch_quiesce(struct subchannel *sch); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index ee82207b4e60..0b4cc8c597ae 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -684,42 +684,41 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm, AP_DOMAINS); } -static int vfio_ap_mdev_probe(struct mdev_device *mdev) +static int vfio_ap_mdev_init_dev(struct vfio_device *vdev) { - struct ap_matrix_mdev *matrix_mdev; - int ret; - - if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0)) - return -EPERM; - - matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); - if (!matrix_mdev) { - ret = -ENOMEM; - goto err_dec_available; - } - vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev, - &vfio_ap_matrix_dev_ops); + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); - matrix_mdev->mdev = mdev; + matrix_mdev->mdev = to_mdev_device(vdev->dev); vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); matrix_mdev->pqap_hook = handle_pqap; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); hash_init(matrix_mdev->qtable.queues); + return 0; +} + +static int vfio_ap_mdev_probe(struct mdev_device *mdev) +{ + struct ap_matrix_mdev *matrix_mdev; + int ret; + + matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, + &vfio_ap_matrix_dev_ops); + if (IS_ERR(matrix_mdev)) + return PTR_ERR(matrix_mdev); + ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); if (ret) - goto err_list; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, matrix_mdev); mutex_lock(&matrix_dev->mdevs_lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); mutex_unlock(&matrix_dev->mdevs_lock); return 0; -err_list: - vfio_uninit_group_dev(&matrix_mdev->vdev); - kfree(matrix_mdev); -err_dec_available: - atomic_inc(&matrix_dev->available_instances); +err_put_vdev: + vfio_put_device(&matrix_mdev->vdev); return ret; } @@ -766,6 +765,11 @@ static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) } } +static void vfio_ap_mdev_release_dev(struct vfio_device *vdev) +{ + vfio_free_device(vdev); +} + static void vfio_ap_mdev_remove(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); @@ -779,54 +783,9 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) list_del(&matrix_mdev->node); mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_dev->guests_lock); - vfio_uninit_group_dev(&matrix_mdev->vdev); - kfree(matrix_mdev); - atomic_inc(&matrix_dev->available_instances); + vfio_put_device(&matrix_mdev->vdev); } -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT); -} - -static MDEV_TYPE_ATTR_RO(name); - -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", - atomic_read(&matrix_dev->available_instances)); -} - -static MDEV_TYPE_ATTR_RO(available_instances); - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING); -} - -static MDEV_TYPE_ATTR_RO(device_api); - -static struct attribute *vfio_ap_mdev_type_attrs[] = { - &mdev_type_attr_name.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_available_instances.attr, - NULL, -}; - -static struct attribute_group vfio_ap_mdev_hwvirt_type_group = { - .name = VFIO_AP_MDEV_TYPE_HWVIRT, - .attrs = vfio_ap_mdev_type_attrs, -}; - -static struct attribute_group *vfio_ap_mdev_type_groups[] = { - &vfio_ap_mdev_hwvirt_type_group, - NULL, -}; - #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ "already assigned to %s" @@ -1824,6 +1783,8 @@ static const struct attribute_group vfio_queue_attr_group = { }; static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { + .init = vfio_ap_mdev_init_dev, + .release = vfio_ap_mdev_release_dev, .open_device = vfio_ap_mdev_open_device, .close_device = vfio_ap_mdev_close_device, .ioctl = vfio_ap_mdev_ioctl, @@ -1831,6 +1792,8 @@ static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { }; static struct mdev_driver vfio_ap_matrix_driver = { + .device_api = VFIO_DEVICE_API_AP_STRING, + .max_instances = MAX_ZDEV_ENTRIES_EXT, .driver = { .name = "vfio_ap_mdev", .owner = THIS_MODULE, @@ -1839,20 +1802,22 @@ static struct mdev_driver vfio_ap_matrix_driver = { }, .probe = vfio_ap_mdev_probe, .remove = vfio_ap_mdev_remove, - .supported_type_groups = vfio_ap_mdev_type_groups, }; int vfio_ap_mdev_register(void) { int ret; - atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT); - ret = mdev_register_driver(&vfio_ap_matrix_driver); if (ret) return ret; - ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver); + matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; + matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; + matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; + ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, + &vfio_ap_matrix_driver, + matrix_dev->mdev_types, 1); if (ret) goto err_driver; return 0; @@ -1864,7 +1829,7 @@ err_driver: void vfio_ap_mdev_unregister(void) { - mdev_unregister_device(&matrix_dev->device); + mdev_unregister_parent(&matrix_dev->parent); mdev_unregister_driver(&vfio_ap_matrix_driver); } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index d782cf463eab..2eddd5f34ed3 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -13,7 +13,6 @@ #define _VFIO_AP_PRIVATE_H_ #include <linux/types.h> -#include <linux/device.h> #include <linux/mdev.h> #include <linux/delay.h> #include <linux/mutex.h> @@ -30,7 +29,6 @@ * struct ap_matrix_dev - Contains the data for the matrix device. * * @device: generic device structure associated with the AP matrix device - * @available_instances: number of mediated matrix devices that can be created * @info: the struct containing the output from the PQAP(QCI) instruction * @mdev_list: the list of mediated matrix devices created * @mdevs_lock: mutex for locking the AP matrix device. This lock will be @@ -47,12 +45,14 @@ */ struct ap_matrix_dev { struct device device; - atomic_t available_instances; struct ap_config_info info; struct list_head mdev_list; struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */ struct ap_driver *vfio_ap_drv; struct mutex guests_lock; /* serializes access to each KVM guest */ + struct mdev_parent parent; + struct mdev_type mdev_type; + struct mdev_type *mdev_types[]; }; extern struct ap_matrix_dev *matrix_dev; diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c index 2de3896489c8..897cb8db5084 100644 --- a/drivers/ssb/driver_gpio.c +++ b/drivers/ssb/driver_gpio.c @@ -132,7 +132,8 @@ static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) - generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); + generic_handle_domain_irq_safe(bus->irq_domain, gpio); + ssb_chipco_gpio_polarity(chipco, irqs, val & irqs); return IRQ_HANDLED; @@ -330,7 +331,8 @@ static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) - generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); + generic_handle_domain_irq_safe(bus->irq_domain, gpio); + ssb_extif_gpio_polarity(extif, irqs, val & irqs); return IRQ_HANDLED; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 6130d00252ed..86c381ceb9a1 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -3,6 +3,7 @@ menuconfig VFIO tristate "VFIO Non-Privileged userspace driver framework" select IOMMU_API select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + select INTERVAL_TREE help VFIO provides a framework for secure userspace device drivers. See Documentation/driver-api/vfio.rst for more details. diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index 1a32357592e3..b693a1169286 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,9 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 vfio_virqfd-y := virqfd.o -vfio-y += vfio_main.o - obj-$(CONFIG_VFIO) += vfio.o + +vfio-y += vfio_main.o \ + iova_bitmap.o \ + container.o + obj-$(CONFIG_VFIO_VIRQFD) += vfio_virqfd.o obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c new file mode 100644 index 000000000000..d74164abbf40 --- /dev/null +++ b/drivers/vfio/container.c @@ -0,0 +1,680 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * + * VFIO container (/dev/vfio/vfio) + */ +#include <linux/file.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/capability.h> +#include <linux/iommu.h> +#include <linux/miscdevice.h> +#include <linux/vfio.h> +#include <uapi/linux/vfio.h> + +#include "vfio.h" + +struct vfio_container { + struct kref kref; + struct list_head group_list; + struct rw_semaphore group_lock; + struct vfio_iommu_driver *iommu_driver; + void *iommu_data; + bool noiommu; +}; + +static struct vfio { + struct list_head iommu_drivers_list; + struct mutex iommu_drivers_lock; +} vfio; + +#ifdef CONFIG_VFIO_NOIOMMU +bool vfio_noiommu __read_mostly; +module_param_named(enable_unsafe_noiommu_mode, + vfio_noiommu, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); +#endif + +static void *vfio_noiommu_open(unsigned long arg) +{ + if (arg != VFIO_NOIOMMU_IOMMU) + return ERR_PTR(-EINVAL); + if (!capable(CAP_SYS_RAWIO)) + return ERR_PTR(-EPERM); + + return NULL; +} + +static void vfio_noiommu_release(void *iommu_data) +{ +} + +static long vfio_noiommu_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + if (cmd == VFIO_CHECK_EXTENSION) + return vfio_noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0; + + return -ENOTTY; +} + +static int vfio_noiommu_attach_group(void *iommu_data, + struct iommu_group *iommu_group, enum vfio_group_type type) +{ + return 0; +} + +static void vfio_noiommu_detach_group(void *iommu_data, + struct iommu_group *iommu_group) +{ +} + +static const struct vfio_iommu_driver_ops vfio_noiommu_ops = { + .name = "vfio-noiommu", + .owner = THIS_MODULE, + .open = vfio_noiommu_open, + .release = vfio_noiommu_release, + .ioctl = vfio_noiommu_ioctl, + .attach_group = vfio_noiommu_attach_group, + .detach_group = vfio_noiommu_detach_group, +}; + +/* + * Only noiommu containers can use vfio-noiommu and noiommu containers can only + * use vfio-noiommu. + */ +static bool vfio_iommu_driver_allowed(struct vfio_container *container, + const struct vfio_iommu_driver *driver) +{ + if (!IS_ENABLED(CONFIG_VFIO_NOIOMMU)) + return true; + return container->noiommu == (driver->ops == &vfio_noiommu_ops); +} + +/* + * IOMMU driver registration + */ +int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver, *tmp; + + if (WARN_ON(!ops->register_device != !ops->unregister_device)) + return -EINVAL; + + driver = kzalloc(sizeof(*driver), GFP_KERNEL); + if (!driver) + return -ENOMEM; + + driver->ops = ops; + + mutex_lock(&vfio.iommu_drivers_lock); + + /* Check for duplicates */ + list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { + if (tmp->ops == ops) { + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return -EINVAL; + } + } + + list_add(&driver->vfio_next, &vfio.iommu_drivers_list); + + mutex_unlock(&vfio.iommu_drivers_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); + +void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) +{ + struct vfio_iommu_driver *driver; + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + if (driver->ops == ops) { + list_del(&driver->vfio_next); + mutex_unlock(&vfio.iommu_drivers_lock); + kfree(driver); + return; + } + } + mutex_unlock(&vfio.iommu_drivers_lock); +} +EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); + +/* + * Container objects - containers are created when /dev/vfio/vfio is + * opened, but their lifecycle extends until the last user is done, so + * it's freed via kref. Must support container/group/device being + * closed in any order. + */ +static void vfio_container_release(struct kref *kref) +{ + struct vfio_container *container; + container = container_of(kref, struct vfio_container, kref); + + kfree(container); +} + +static void vfio_container_get(struct vfio_container *container) +{ + kref_get(&container->kref); +} + +static void vfio_container_put(struct vfio_container *container) +{ + kref_put(&container->kref, vfio_container_release); +} + +void vfio_device_container_register(struct vfio_device *device) +{ + struct vfio_iommu_driver *iommu_driver = + device->group->container->iommu_driver; + + if (iommu_driver && iommu_driver->ops->register_device) + iommu_driver->ops->register_device( + device->group->container->iommu_data, device); +} + +void vfio_device_container_unregister(struct vfio_device *device) +{ + struct vfio_iommu_driver *iommu_driver = + device->group->container->iommu_driver; + + if (iommu_driver && iommu_driver->ops->unregister_device) + iommu_driver->ops->unregister_device( + device->group->container->iommu_data, device); +} + +long vfio_container_ioctl_check_extension(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = 0; + + down_read(&container->group_lock); + + driver = container->iommu_driver; + + switch (arg) { + /* No base extensions yet */ + default: + /* + * If no driver is set, poll all registered drivers for + * extensions and return the first positive result. If + * a driver is already set, further queries will be passed + * only to that driver. + */ + if (!driver) { + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, + vfio_next) { + + if (!list_empty(&container->group_list) && + !vfio_iommu_driver_allowed(container, + driver)) + continue; + if (!try_module_get(driver->ops->owner)) + continue; + + ret = driver->ops->ioctl(NULL, + VFIO_CHECK_EXTENSION, + arg); + module_put(driver->ops->owner); + if (ret > 0) + break; + } + mutex_unlock(&vfio.iommu_drivers_lock); + } else + ret = driver->ops->ioctl(container->iommu_data, + VFIO_CHECK_EXTENSION, arg); + } + + up_read(&container->group_lock); + + return ret; +} + +/* hold write lock on container->group_lock */ +static int __vfio_container_attach_groups(struct vfio_container *container, + struct vfio_iommu_driver *driver, + void *data) +{ + struct vfio_group *group; + int ret = -ENODEV; + + list_for_each_entry(group, &container->group_list, container_next) { + ret = driver->ops->attach_group(data, group->iommu_group, + group->type); + if (ret) + goto unwind; + } + + return ret; + +unwind: + list_for_each_entry_continue_reverse(group, &container->group_list, + container_next) { + driver->ops->detach_group(data, group->iommu_group); + } + + return ret; +} + +static long vfio_ioctl_set_iommu(struct vfio_container *container, + unsigned long arg) +{ + struct vfio_iommu_driver *driver; + long ret = -ENODEV; + + down_write(&container->group_lock); + + /* + * The container is designed to be an unprivileged interface while + * the group can be assigned to specific users. Therefore, only by + * adding a group to a container does the user get the privilege of + * enabling the iommu, which may allocate finite resources. There + * is no unset_iommu, but by removing all the groups from a container, + * the container is deprivileged and returns to an unset state. + */ + if (list_empty(&container->group_list) || container->iommu_driver) { + up_write(&container->group_lock); + return -EINVAL; + } + + mutex_lock(&vfio.iommu_drivers_lock); + list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { + void *data; + + if (!vfio_iommu_driver_allowed(container, driver)) + continue; + if (!try_module_get(driver->ops->owner)) + continue; + + /* + * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, + * so test which iommu driver reported support for this + * extension and call open on them. We also pass them the + * magic, allowing a single driver to support multiple + * interfaces if they'd like. + */ + if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { + module_put(driver->ops->owner); + continue; + } + + data = driver->ops->open(arg); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + module_put(driver->ops->owner); + continue; + } + + ret = __vfio_container_attach_groups(container, driver, data); + if (ret) { + driver->ops->release(data); + module_put(driver->ops->owner); + continue; + } + + container->iommu_driver = driver; + container->iommu_data = data; + break; + } + + mutex_unlock(&vfio.iommu_drivers_lock); + up_write(&container->group_lock); + + return ret; +} + +static long vfio_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver; + void *data; + long ret = -EINVAL; + + if (!container) + return ret; + + switch (cmd) { + case VFIO_GET_API_VERSION: + ret = VFIO_API_VERSION; + break; + case VFIO_CHECK_EXTENSION: + ret = vfio_container_ioctl_check_extension(container, arg); + break; + case VFIO_SET_IOMMU: + ret = vfio_ioctl_set_iommu(container, arg); + break; + default: + driver = container->iommu_driver; + data = container->iommu_data; + + if (driver) /* passthrough all unrecognized ioctls */ + ret = driver->ops->ioctl(data, cmd, arg); + } + + return ret; +} + +static int vfio_fops_open(struct inode *inode, struct file *filep) +{ + struct vfio_container *container; + + container = kzalloc(sizeof(*container), GFP_KERNEL); + if (!container) + return -ENOMEM; + + INIT_LIST_HEAD(&container->group_list); + init_rwsem(&container->group_lock); + kref_init(&container->kref); + + filep->private_data = container; + + return 0; +} + +static int vfio_fops_release(struct inode *inode, struct file *filep) +{ + struct vfio_container *container = filep->private_data; + struct vfio_iommu_driver *driver = container->iommu_driver; + + if (driver && driver->ops->notify) + driver->ops->notify(container->iommu_data, + VFIO_IOMMU_CONTAINER_CLOSE); + + filep->private_data = NULL; + + vfio_container_put(container); + + return 0; +} + +static const struct file_operations vfio_fops = { + .owner = THIS_MODULE, + .open = vfio_fops_open, + .release = vfio_fops_release, + .unlocked_ioctl = vfio_fops_unl_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +struct vfio_container *vfio_container_from_file(struct file *file) +{ + struct vfio_container *container; + + /* Sanity check, is this really our fd? */ + if (file->f_op != &vfio_fops) + return NULL; + + container = file->private_data; + WARN_ON(!container); /* fget ensures we don't race vfio_release */ + return container; +} + +static struct miscdevice vfio_dev = { + .minor = VFIO_MINOR, + .name = "vfio", + .fops = &vfio_fops, + .nodename = "vfio/vfio", + .mode = S_IRUGO | S_IWUGO, +}; + +int vfio_container_attach_group(struct vfio_container *container, + struct vfio_group *group) +{ + struct vfio_iommu_driver *driver; + int ret = 0; + + lockdep_assert_held(&group->group_lock); + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + down_write(&container->group_lock); + + /* Real groups and fake groups cannot mix */ + if (!list_empty(&container->group_list) && + container->noiommu != (group->type == VFIO_NO_IOMMU)) { + ret = -EPERM; + goto out_unlock_container; + } + + if (group->type == VFIO_IOMMU) { + ret = iommu_group_claim_dma_owner(group->iommu_group, group); + if (ret) + goto out_unlock_container; + } + + driver = container->iommu_driver; + if (driver) { + ret = driver->ops->attach_group(container->iommu_data, + group->iommu_group, + group->type); + if (ret) { + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner( + group->iommu_group); + goto out_unlock_container; + } + } + + group->container = container; + group->container_users = 1; + container->noiommu = (group->type == VFIO_NO_IOMMU); + list_add(&group->container_next, &container->group_list); + + /* Get a reference on the container and mark a user within the group */ + vfio_container_get(container); + +out_unlock_container: + up_write(&container->group_lock); + return ret; +} + +void vfio_group_detach_container(struct vfio_group *group) +{ + struct vfio_container *container = group->container; + struct vfio_iommu_driver *driver; + + lockdep_assert_held(&group->group_lock); + WARN_ON(group->container_users != 1); + + down_write(&container->group_lock); + + driver = container->iommu_driver; + if (driver) + driver->ops->detach_group(container->iommu_data, + group->iommu_group); + + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner(group->iommu_group); + + group->container = NULL; + group->container_users = 0; + list_del(&group->container_next); + + /* Detaching the last group deprivileges a container, remove iommu */ + if (driver && list_empty(&container->group_list)) { + driver->ops->release(container->iommu_data); + module_put(driver->ops->owner); + container->iommu_driver = NULL; + container->iommu_data = NULL; + } + + up_write(&container->group_lock); + + vfio_container_put(container); +} + +int vfio_device_assign_container(struct vfio_device *device) +{ + struct vfio_group *group = device->group; + + lockdep_assert_held(&group->group_lock); + + if (!group->container || !group->container->iommu_driver || + WARN_ON(!group->container_users)) + return -EINVAL; + + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) + return -EPERM; + + get_file(group->opened_file); + group->container_users++; + return 0; +} + +void vfio_device_unassign_container(struct vfio_device *device) +{ + mutex_lock(&device->group->group_lock); + WARN_ON(device->group->container_users <= 1); + device->group->container_users--; + fput(device->group->opened_file); + mutex_unlock(&device->group->group_lock); +} + +/* + * Pin contiguous user pages and return their associated host pages for local + * domain only. + * @device [in] : device + * @iova [in] : starting IOVA of user pages to be pinned. + * @npage [in] : count of pages to be pinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @pages[out] : array of host pages + * Return error or number of pages pinned. + * + * A driver may only call this function if the vfio_device was created + * by vfio_register_emulated_iommu_dev(). + */ +int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, + int npage, int prot, struct page **pages) +{ + struct vfio_container *container; + struct vfio_group *group = device->group; + struct vfio_iommu_driver *driver; + int ret; + + if (!pages || !npage || !vfio_assert_device_open(device)) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + /* group->container cannot change while a vfio device is open */ + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, + group->iommu_group, iova, + npage, prot, pages); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_pin_pages); + +/* + * Unpin contiguous host pages for local domain only. + * @device [in] : device + * @iova [in] : starting address of user pages to be unpinned. + * @npage [in] : count of pages to be unpinned. This count should not + * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. + */ +void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + + if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES)) + return; + + if (WARN_ON(!vfio_assert_device_open(device))) + return; + + /* group->container cannot change while a vfio device is open */ + container = device->group->container; + driver = container->iommu_driver; + + driver->ops->unpin_pages(container->iommu_data, iova, npage); +} +EXPORT_SYMBOL(vfio_unpin_pages); + +/* + * This interface allows the CPUs to perform some sort of virtual DMA on + * behalf of the device. + * + * CPUs read/write from/into a range of IOVAs pointing to user space memory + * into/from a kernel buffer. + * + * As the read/write of user space memory is conducted via the CPUs and is + * not a real device DMA, it is not necessary to pin the user space memory. + * + * @device [in] : VFIO device + * @iova [in] : base IOVA of a user space buffer + * @data [in] : pointer to kernel buffer + * @len [in] : kernel buffer length + * @write : indicate read or write + * Return error code on failure or 0 on success. + */ +int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, + size_t len, bool write) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + if (!data || len <= 0 || !vfio_assert_device_open(device)) + return -EINVAL; + + /* group->container cannot change while a vfio device is open */ + container = device->group->container; + driver = container->iommu_driver; + + if (likely(driver && driver->ops->dma_rw)) + ret = driver->ops->dma_rw(container->iommu_data, + iova, data, len, write); + else + ret = -ENOTTY; + return ret; +} +EXPORT_SYMBOL(vfio_dma_rw); + +int __init vfio_container_init(void) +{ + int ret; + + mutex_init(&vfio.iommu_drivers_lock); + INIT_LIST_HEAD(&vfio.iommu_drivers_list); + + ret = misc_register(&vfio_dev); + if (ret) { + pr_err("vfio: misc device register failed\n"); + return ret; + } + + if (IS_ENABLED(CONFIG_VFIO_NOIOMMU)) { + ret = vfio_register_iommu_driver(&vfio_noiommu_ops); + if (ret) + goto err_misc; + } + return 0; + +err_misc: + misc_deregister(&vfio_dev); + return ret; +} + +void vfio_container_cleanup(void) +{ + if (IS_ENABLED(CONFIG_VFIO_NOIOMMU)) + vfio_unregister_iommu_driver(&vfio_noiommu_ops); + misc_deregister(&vfio_dev); + mutex_destroy(&vfio.iommu_drivers_lock); +} diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c index 3feff729f3ce..b16874e913e4 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c @@ -108,9 +108,9 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev) /* reset the device before cleaning up the interrupts */ ret = vfio_fsl_mc_reset_device(vdev); - if (WARN_ON(ret)) + if (ret) dev_warn(&mc_cont->dev, - "VFIO_FLS_MC: reset device has failed (%d)\n", ret); + "VFIO_FSL_MC: reset device has failed (%d)\n", ret); vfio_fsl_mc_irqs_cleanup(vdev); @@ -418,16 +418,7 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev, return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma); } -static const struct vfio_device_ops vfio_fsl_mc_ops = { - .name = "vfio-fsl-mc", - .open_device = vfio_fsl_mc_open_device, - .close_device = vfio_fsl_mc_close_device, - .ioctl = vfio_fsl_mc_ioctl, - .read = vfio_fsl_mc_read, - .write = vfio_fsl_mc_write, - .mmap = vfio_fsl_mc_mmap, -}; - +static const struct vfio_device_ops vfio_fsl_mc_ops; static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -518,35 +509,43 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev) bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb); } -static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) +static int vfio_fsl_mc_init_dev(struct vfio_device *core_vdev) { - struct vfio_fsl_mc_device *vdev; - struct device *dev = &mc_dev->dev; + struct vfio_fsl_mc_device *vdev = + container_of(core_vdev, struct vfio_fsl_mc_device, vdev); + struct fsl_mc_device *mc_dev = to_fsl_mc_device(core_vdev->dev); int ret; - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - - vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops); vdev->mc_dev = mc_dev; mutex_init(&vdev->igate); if (is_fsl_mc_bus_dprc(mc_dev)) - ret = vfio_assign_device_set(&vdev->vdev, &mc_dev->dev); + ret = vfio_assign_device_set(core_vdev, &mc_dev->dev); else - ret = vfio_assign_device_set(&vdev->vdev, mc_dev->dev.parent); - if (ret) - goto out_uninit; + ret = vfio_assign_device_set(core_vdev, mc_dev->dev.parent); - ret = vfio_fsl_mc_init_device(vdev); if (ret) - goto out_uninit; + return ret; + + /* device_set is released by vfio core if @init fails */ + return vfio_fsl_mc_init_device(vdev); +} + +static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) +{ + struct vfio_fsl_mc_device *vdev; + struct device *dev = &mc_dev->dev; + int ret; + + vdev = vfio_alloc_device(vfio_fsl_mc_device, vdev, dev, + &vfio_fsl_mc_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); ret = vfio_register_group_dev(&vdev->vdev); if (ret) { dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n"); - goto out_device; + goto out_put_vdev; } ret = vfio_fsl_mc_scan_container(mc_dev); @@ -557,30 +556,44 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) out_group_dev: vfio_unregister_group_dev(&vdev->vdev); -out_device: - vfio_fsl_uninit_device(vdev); -out_uninit: - vfio_uninit_group_dev(&vdev->vdev); - kfree(vdev); +out_put_vdev: + vfio_put_device(&vdev->vdev); return ret; } +static void vfio_fsl_mc_release_dev(struct vfio_device *core_vdev) +{ + struct vfio_fsl_mc_device *vdev = + container_of(core_vdev, struct vfio_fsl_mc_device, vdev); + + vfio_fsl_uninit_device(vdev); + mutex_destroy(&vdev->igate); + vfio_free_device(core_vdev); +} + static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) { struct device *dev = &mc_dev->dev; struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev); vfio_unregister_group_dev(&vdev->vdev); - mutex_destroy(&vdev->igate); - dprc_remove_devices(mc_dev, NULL, 0); - vfio_fsl_uninit_device(vdev); - - vfio_uninit_group_dev(&vdev->vdev); - kfree(vdev); + vfio_put_device(&vdev->vdev); return 0; } +static const struct vfio_device_ops vfio_fsl_mc_ops = { + .name = "vfio-fsl-mc", + .init = vfio_fsl_mc_init_dev, + .release = vfio_fsl_mc_release_dev, + .open_device = vfio_fsl_mc_open_device, + .close_device = vfio_fsl_mc_close_device, + .ioctl = vfio_fsl_mc_ioctl, + .read = vfio_fsl_mc_read, + .write = vfio_fsl_mc_write, + .mmap = vfio_fsl_mc_mmap, +}; + static struct fsl_mc_driver vfio_fsl_mc_driver = { .probe = vfio_fsl_mc_probe, .remove = vfio_fsl_mc_remove, diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c new file mode 100644 index 000000000000..6631e8befe1b --- /dev/null +++ b/drivers/vfio/iova_bitmap.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022, Oracle and/or its affiliates. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ +#include <linux/iova_bitmap.h> +#include <linux/mm.h> +#include <linux/highmem.h> + +#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) + +/* + * struct iova_bitmap_map - A bitmap representing an IOVA range + * + * Main data structure for tracking mapped user pages of bitmap data. + * + * For example, for something recording dirty IOVAs, it will be provided a + * struct iova_bitmap structure, as a general structure for iterating the + * total IOVA range. The struct iova_bitmap_map, though, represents the + * subset of said IOVA space that is pinned by its parent structure (struct + * iova_bitmap). + * + * The user does not need to exact location of the bits in the bitmap. + * From user perspective the only API available is iova_bitmap_set() which + * records the IOVA *range* in the bitmap by setting the corresponding + * bits. + * + * The bitmap is an array of u64 whereas each bit represents an IOVA of + * range of (1 << pgshift). Thus formula for the bitmap data to be set is: + * + * data[(iova / page_size) / 64] & (1ULL << (iova % 64)) + */ +struct iova_bitmap_map { + /* base IOVA representing bit 0 of the first page */ + unsigned long iova; + + /* page size order that each bit granules to */ + unsigned long pgshift; + + /* page offset of the first user page pinned */ + unsigned long pgoff; + + /* number of pages pinned */ + unsigned long npages; + + /* pinned pages representing the bitmap data */ + struct page **pages; +}; + +/* + * struct iova_bitmap - The IOVA bitmap object + * + * Main data structure for iterating over the bitmap data. + * + * Abstracts the pinning work and iterates in IOVA ranges. + * It uses a windowing scheme and pins the bitmap in relatively + * big ranges e.g. + * + * The bitmap object uses one base page to store all the pinned pages + * pointers related to the bitmap. For sizeof(struct page*) == 8 it stores + * 512 struct page pointers which, if the base page size is 4K, it means + * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is + * also 4K then the range window to iterate is 64G. + * + * For example iterating on a total IOVA range of 4G..128G, it will walk + * through this set of ranges: + * + * 4G - 68G-1 (64G) + * 68G - 128G-1 (64G) + * + * An example of the APIs on how to use/iterate over the IOVA bitmap: + * + * bitmap = iova_bitmap_alloc(iova, length, page_size, data); + * if (IS_ERR(bitmap)) + * return PTR_ERR(bitmap); + * + * ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn); + * + * iova_bitmap_free(bitmap); + * + * Each iteration of the @dirty_reporter_fn is called with a unique @iova + * and @length argument, indicating the current range available through the + * iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty + * areas (@iova_length) within that provided range, as following: + * + * iova_bitmap_set(bitmap, iova, iova_length); + * + * The internals of the object uses an index @mapped_base_index that indexes + * which u64 word of the bitmap is mapped, up to @mapped_total_index. + * Those keep being incremented until @mapped_total_index is reached while + * mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages. + * + * The IOVA bitmap is usually located on what tracks DMA mapped ranges or + * some form of IOVA range tracking that co-relates to the user passed + * bitmap. + */ +struct iova_bitmap { + /* IOVA range representing the currently mapped bitmap data */ + struct iova_bitmap_map mapped; + + /* userspace address of the bitmap */ + u64 __user *bitmap; + + /* u64 index that @mapped points to */ + unsigned long mapped_base_index; + + /* how many u64 can we walk in total */ + unsigned long mapped_total_index; + + /* base IOVA of the whole bitmap */ + unsigned long iova; + + /* length of the IOVA range for the whole bitmap */ + size_t length; +}; + +/* + * Converts a relative IOVA to a bitmap index. + * This function provides the index into the u64 array (bitmap::bitmap) + * for a given IOVA offset. + * Relative IOVA means relative to the bitmap::mapped base IOVA + * (stored in mapped::iova). All computations in this file are done using + * relative IOVAs and thus avoid an extra subtraction against mapped::iova. + * The user API iova_bitmap_set() always uses a regular absolute IOVAs. + */ +static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, + unsigned long iova) +{ + unsigned long pgsize = 1 << bitmap->mapped.pgshift; + + return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); +} + +/* + * Converts a bitmap index to a *relative* IOVA. + */ +static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap, + unsigned long index) +{ + unsigned long pgshift = bitmap->mapped.pgshift; + + return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift; +} + +/* + * Returns the base IOVA of the mapped range. + */ +static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap) +{ + unsigned long skip = bitmap->mapped_base_index; + + return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip); +} + +/* + * Pins the bitmap user pages for the current range window. + * This is internal to IOVA bitmap and called when advancing the + * index (@mapped_base_index) or allocating the bitmap. + */ +static int iova_bitmap_get(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + unsigned long npages; + u64 __user *addr; + long ret; + + /* + * @mapped_base_index is the index of the currently mapped u64 words + * that we have access. Anything before @mapped_base_index is not + * mapped. The range @mapped_base_index .. @mapped_total_index-1 is + * mapped but capped at a maximum number of pages. + */ + npages = DIV_ROUND_UP((bitmap->mapped_total_index - + bitmap->mapped_base_index) * + sizeof(*bitmap->bitmap), PAGE_SIZE); + + /* + * We always cap at max number of 'struct page' a base page can fit. + * This is, for example, on x86 means 2M of bitmap data max. + */ + npages = min(npages, PAGE_SIZE / sizeof(struct page *)); + + /* + * Bitmap address to be pinned is calculated via pointer arithmetic + * with bitmap u64 word index. + */ + addr = bitmap->bitmap + bitmap->mapped_base_index; + + ret = pin_user_pages_fast((unsigned long)addr, npages, + FOLL_WRITE, mapped->pages); + if (ret <= 0) + return -EFAULT; + + mapped->npages = (unsigned long)ret; + /* Base IOVA where @pages point to i.e. bit 0 of the first page */ + mapped->iova = iova_bitmap_mapped_iova(bitmap); + + /* + * offset of the page where pinned pages bit 0 is located. + * This handles the case where the bitmap is not PAGE_SIZE + * aligned. + */ + mapped->pgoff = offset_in_page(addr); + return 0; +} + +/* + * Unpins the bitmap user pages and clears @npages + * (un)pinning is abstracted from API user and it's done when advancing + * the index or freeing the bitmap. + */ +static void iova_bitmap_put(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + + if (mapped->npages) { + unpin_user_pages(mapped->pages, mapped->npages); + mapped->npages = 0; + } +} + +/** + * iova_bitmap_alloc() - Allocates an IOVA bitmap object + * @iova: Start address of the IOVA range + * @length: Length of the IOVA range + * @page_size: Page size of the IOVA bitmap. It defines what each bit + * granularity represents + * @data: Userspace address of the bitmap + * + * Allocates an IOVA object and initializes all its fields including the + * first user pages of @data. + * + * Return: A pointer to a newly allocated struct iova_bitmap + * or ERR_PTR() on error. + */ +struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, + unsigned long page_size, u64 __user *data) +{ + struct iova_bitmap_map *mapped; + struct iova_bitmap *bitmap; + int rc; + + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); + if (!bitmap) + return ERR_PTR(-ENOMEM); + + mapped = &bitmap->mapped; + mapped->pgshift = __ffs(page_size); + bitmap->bitmap = data; + bitmap->mapped_total_index = + iova_bitmap_offset_to_index(bitmap, length - 1) + 1; + bitmap->iova = iova; + bitmap->length = length; + mapped->iova = iova; + mapped->pages = (struct page **)__get_free_page(GFP_KERNEL); + if (!mapped->pages) { + rc = -ENOMEM; + goto err; + } + + rc = iova_bitmap_get(bitmap); + if (rc) + goto err; + return bitmap; + +err: + iova_bitmap_free(bitmap); + return ERR_PTR(rc); +} + +/** + * iova_bitmap_free() - Frees an IOVA bitmap object + * @bitmap: IOVA bitmap to free + * + * It unpins and releases pages array memory and clears any leftover + * state. + */ +void iova_bitmap_free(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + + iova_bitmap_put(bitmap); + + if (mapped->pages) { + free_page((unsigned long)mapped->pages); + mapped->pages = NULL; + } + + kfree(bitmap); +} + +/* + * Returns the remaining bitmap indexes from mapped_total_index to process for + * the currently pinned bitmap pages. + */ +static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap) +{ + unsigned long remaining; + + remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; + remaining = min_t(unsigned long, remaining, + (bitmap->mapped.npages << PAGE_SHIFT) / sizeof(*bitmap->bitmap)); + + return remaining; +} + +/* + * Returns the length of the mapped IOVA range. + */ +static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap) +{ + unsigned long max_iova = bitmap->iova + bitmap->length - 1; + unsigned long iova = iova_bitmap_mapped_iova(bitmap); + unsigned long remaining; + + /* + * iova_bitmap_mapped_remaining() returns a number of indexes which + * when converted to IOVA gives us a max length that the bitmap + * pinned data can cover. Afterwards, that is capped to + * only cover the IOVA range in @bitmap::iova .. @bitmap::length. + */ + remaining = iova_bitmap_index_to_offset(bitmap, + iova_bitmap_mapped_remaining(bitmap)); + + if (iova + remaining - 1 > max_iova) + remaining -= ((iova + remaining - 1) - max_iova); + + return remaining; +} + +/* + * Returns true if there's not more data to iterate. + */ +static bool iova_bitmap_done(struct iova_bitmap *bitmap) +{ + return bitmap->mapped_base_index >= bitmap->mapped_total_index; +} + +/* + * Advances to the next range, releases the current pinned + * pages and pins the next set of bitmap pages. + * Returns 0 on success or otherwise errno. + */ +static int iova_bitmap_advance(struct iova_bitmap *bitmap) +{ + unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1; + unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1; + + bitmap->mapped_base_index += count; + + iova_bitmap_put(bitmap); + if (iova_bitmap_done(bitmap)) + return 0; + + /* When advancing the index we pin the next set of bitmap pages */ + return iova_bitmap_get(bitmap); +} + +/** + * iova_bitmap_for_each() - Iterates over the bitmap + * @bitmap: IOVA bitmap to iterate + * @opaque: Additional argument to pass to the callback + * @fn: Function that gets called for each IOVA range + * + * Helper function to iterate over bitmap data representing a portion of IOVA + * space. It hides the complexity of iterating bitmaps and translating the + * mapped bitmap user pages into IOVA ranges to process. + * + * Return: 0 on success, and an error on failure either upon + * iteration or when the callback returns an error. + */ +int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + int ret = 0; + + for (; !iova_bitmap_done(bitmap) && !ret; + ret = iova_bitmap_advance(bitmap)) { + ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap), + iova_bitmap_mapped_length(bitmap), opaque); + if (ret) + break; + } + + return ret; +} + +/** + * iova_bitmap_set() - Records an IOVA range in bitmap + * @bitmap: IOVA bitmap + * @iova: IOVA to start + * @length: IOVA range length + * + * Set the bits corresponding to the range [iova .. iova+length-1] in + * the user bitmap. + * + * Return: The number of bits set. + */ +void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + unsigned long offset = (iova - mapped->iova) >> mapped->pgshift; + unsigned long nbits = max_t(unsigned long, 1, length >> mapped->pgshift); + unsigned long page_idx = offset / BITS_PER_PAGE; + unsigned long page_offset = mapped->pgoff; + void *kaddr; + + offset = offset % BITS_PER_PAGE; + + do { + unsigned long size = min(BITS_PER_PAGE - offset, nbits); + + kaddr = kmap_local_page(mapped->pages[page_idx]); + bitmap_set(kaddr + page_offset, offset, size); + kunmap_local(kaddr); + page_offset = offset = 0; + nbits -= size; + page_idx++; + } while (nbits > 0); +} +EXPORT_SYMBOL_GPL(iova_bitmap_set); diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index b8b9e7911e55..58f91b3bd670 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -8,9 +8,7 @@ */ #include <linux/module.h> -#include <linux/device.h> #include <linux/slab.h> -#include <linux/uuid.h> #include <linux/sysfs.h> #include <linux/mdev.h> @@ -20,71 +18,11 @@ #define DRIVER_AUTHOR "NVIDIA Corporation" #define DRIVER_DESC "Mediated device Core Driver" -static LIST_HEAD(parent_list); -static DEFINE_MUTEX(parent_list_lock); static struct class_compat *mdev_bus_compat_class; static LIST_HEAD(mdev_list); static DEFINE_MUTEX(mdev_list_lock); -struct device *mdev_parent_dev(struct mdev_device *mdev) -{ - return mdev->type->parent->dev; -} -EXPORT_SYMBOL(mdev_parent_dev); - -/* - * Return the index in supported_type_groups that this mdev_device was created - * from. - */ -unsigned int mdev_get_type_group_id(struct mdev_device *mdev) -{ - return mdev->type->type_group_id; -} -EXPORT_SYMBOL(mdev_get_type_group_id); - -/* - * Used in mdev_type_attribute sysfs functions to return the index in the - * supported_type_groups that the sysfs is called from. - */ -unsigned int mtype_get_type_group_id(struct mdev_type *mtype) -{ - return mtype->type_group_id; -} -EXPORT_SYMBOL(mtype_get_type_group_id); - -/* - * Used in mdev_type_attribute sysfs functions to return the parent struct - * device - */ -struct device *mtype_get_parent_dev(struct mdev_type *mtype) -{ - return mtype->parent->dev; -} -EXPORT_SYMBOL(mtype_get_parent_dev); - -/* Should be called holding parent_list_lock */ -static struct mdev_parent *__find_parent_device(struct device *dev) -{ - struct mdev_parent *parent; - - list_for_each_entry(parent, &parent_list, next) { - if (parent->dev == dev) - return parent; - } - return NULL; -} - -void mdev_release_parent(struct kref *kref) -{ - struct mdev_parent *parent = container_of(kref, struct mdev_parent, - ref); - struct device *dev = parent->dev; - - kfree(parent); - put_device(dev); -} - /* Caller must hold parent unreg_sem read or write lock */ static void mdev_device_remove_common(struct mdev_device *mdev) { @@ -99,145 +37,96 @@ static void mdev_device_remove_common(struct mdev_device *mdev) static int mdev_device_remove_cb(struct device *dev, void *data) { - struct mdev_device *mdev = mdev_from_dev(dev); - - if (mdev) - mdev_device_remove_common(mdev); + if (dev->bus == &mdev_bus_type) + mdev_device_remove_common(to_mdev_device(dev)); return 0; } /* - * mdev_register_device : Register a device + * mdev_register_parent: Register a device as parent for mdevs + * @parent: parent structure registered * @dev: device structure representing parent device. * @mdev_driver: Device driver to bind to the newly created mdev + * @types: Array of supported mdev types + * @nr_types: Number of entries in @types + * + * Registers the @parent stucture as a parent for mdev types and thus mdev + * devices. The caller needs to hold a reference on @dev that must not be + * released until after the call to mdev_unregister_parent(). * - * Add device to list of registered parent devices. * Returns a negative value on error, otherwise 0. */ -int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver) +int mdev_register_parent(struct mdev_parent *parent, struct device *dev, + struct mdev_driver *mdev_driver, struct mdev_type **types, + unsigned int nr_types) { - int ret; - struct mdev_parent *parent; char *env_string = "MDEV_STATE=registered"; char *envp[] = { env_string, NULL }; + int ret; - /* check for mandatory ops */ - if (!mdev_driver->supported_type_groups) - return -EINVAL; - - dev = get_device(dev); - if (!dev) - return -EINVAL; - - mutex_lock(&parent_list_lock); - - /* Check for duplicate */ - parent = __find_parent_device(dev); - if (parent) { - parent = NULL; - ret = -EEXIST; - goto add_dev_err; - } - - parent = kzalloc(sizeof(*parent), GFP_KERNEL); - if (!parent) { - ret = -ENOMEM; - goto add_dev_err; - } - - kref_init(&parent->ref); + memset(parent, 0, sizeof(*parent)); init_rwsem(&parent->unreg_sem); - parent->dev = dev; parent->mdev_driver = mdev_driver; + parent->types = types; + parent->nr_types = nr_types; + atomic_set(&parent->available_instances, mdev_driver->max_instances); if (!mdev_bus_compat_class) { mdev_bus_compat_class = class_compat_register("mdev_bus"); - if (!mdev_bus_compat_class) { - ret = -ENOMEM; - goto add_dev_err; - } + if (!mdev_bus_compat_class) + return -ENOMEM; } ret = parent_create_sysfs_files(parent); if (ret) - goto add_dev_err; + return ret; ret = class_compat_create_link(mdev_bus_compat_class, dev, NULL); if (ret) dev_warn(dev, "Failed to create compatibility class link\n"); - list_add(&parent->next, &parent_list); - mutex_unlock(&parent_list_lock); - dev_info(dev, "MDEV: Registered\n"); kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); - return 0; - -add_dev_err: - mutex_unlock(&parent_list_lock); - if (parent) - mdev_put_parent(parent); - else - put_device(dev); - return ret; } -EXPORT_SYMBOL(mdev_register_device); +EXPORT_SYMBOL(mdev_register_parent); /* - * mdev_unregister_device : Unregister a parent device - * @dev: device structure representing parent device. - * - * Remove device from list of registered parent devices. Give a chance to free - * existing mediated devices for given device. + * mdev_unregister_parent : Unregister a parent device + * @parent: parent structure to unregister */ - -void mdev_unregister_device(struct device *dev) +void mdev_unregister_parent(struct mdev_parent *parent) { - struct mdev_parent *parent; char *env_string = "MDEV_STATE=unregistered"; char *envp[] = { env_string, NULL }; - mutex_lock(&parent_list_lock); - parent = __find_parent_device(dev); - - if (!parent) { - mutex_unlock(&parent_list_lock); - return; - } - dev_info(dev, "MDEV: Unregistering\n"); - - list_del(&parent->next); - mutex_unlock(&parent_list_lock); + dev_info(parent->dev, "MDEV: Unregistering\n"); down_write(&parent->unreg_sem); - - class_compat_remove_link(mdev_bus_compat_class, dev, NULL); - - device_for_each_child(dev, NULL, mdev_device_remove_cb); - + class_compat_remove_link(mdev_bus_compat_class, parent->dev, NULL); + device_for_each_child(parent->dev, NULL, mdev_device_remove_cb); parent_remove_sysfs_files(parent); up_write(&parent->unreg_sem); - mdev_put_parent(parent); - - /* We still have the caller's reference to use for the uevent */ - kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + kobject_uevent_env(&parent->dev->kobj, KOBJ_CHANGE, envp); } -EXPORT_SYMBOL(mdev_unregister_device); +EXPORT_SYMBOL(mdev_unregister_parent); static void mdev_device_release(struct device *dev) { struct mdev_device *mdev = to_mdev_device(dev); - - /* Pairs with the get in mdev_device_create() */ - kobject_put(&mdev->type->kobj); + struct mdev_parent *parent = mdev->type->parent; mutex_lock(&mdev_list_lock); list_del(&mdev->next); + if (!parent->mdev_driver->get_available) + atomic_inc(&parent->available_instances); mutex_unlock(&mdev_list_lock); + /* Pairs with the get in mdev_device_create() */ + kobject_put(&mdev->type->kobj); + dev_dbg(&mdev->dev, "MDEV: destroying\n"); kfree(mdev); } @@ -259,6 +148,18 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid) } } + if (!drv->get_available) { + /* + * Note: that non-atomic read and dec is fine here because + * all modifications are under mdev_list_lock. + */ + if (!atomic_read(&parent->available_instances)) { + mutex_unlock(&mdev_list_lock); + return -EUSERS; + } + atomic_dec(&parent->available_instances); + } + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) { mutex_unlock(&mdev_list_lock); diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c index 9c2af59809e2..7825d83a55f8 100644 --- a/drivers/vfio/mdev/mdev_driver.c +++ b/drivers/vfio/mdev/mdev_driver.c @@ -7,7 +7,6 @@ * Kirti Wankhede <[email protected]> */ -#include <linux/device.h> #include <linux/iommu.h> #include <linux/mdev.h> @@ -47,7 +46,6 @@ struct bus_type mdev_bus_type = { .remove = mdev_remove, .match = mdev_match, }; -EXPORT_SYMBOL_GPL(mdev_bus_type); /** * mdev_register_driver - register a new MDEV driver @@ -57,10 +55,11 @@ EXPORT_SYMBOL_GPL(mdev_bus_type); **/ int mdev_register_driver(struct mdev_driver *drv) { + if (!drv->device_api) + return -EINVAL; + /* initialize common driver fields */ drv->driver.bus = &mdev_bus_type; - - /* register with core */ return driver_register(&drv->driver); } EXPORT_SYMBOL(mdev_register_driver); diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h index 7c9fc79f3d83..af457b27f607 100644 --- a/drivers/vfio/mdev/mdev_private.h +++ b/drivers/vfio/mdev/mdev_private.h @@ -13,25 +13,7 @@ int mdev_bus_register(void); void mdev_bus_unregister(void); -struct mdev_parent { - struct device *dev; - struct mdev_driver *mdev_driver; - struct kref ref; - struct list_head next; - struct kset *mdev_types_kset; - struct list_head type_list; - /* Synchronize device creation/removal with parent unregistration */ - struct rw_semaphore unreg_sem; -}; - -struct mdev_type { - struct kobject kobj; - struct kobject *devices_kobj; - struct mdev_parent *parent; - struct list_head next; - unsigned int type_group_id; -}; - +extern struct bus_type mdev_bus_type; extern const struct attribute_group *mdev_device_groups[]; #define to_mdev_type_attr(_attr) \ @@ -48,16 +30,4 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev); int mdev_device_create(struct mdev_type *kobj, const guid_t *uuid); int mdev_device_remove(struct mdev_device *dev); -void mdev_release_parent(struct kref *kref); - -static inline void mdev_get_parent(struct mdev_parent *parent) -{ - kref_get(&parent->ref); -} - -static inline void mdev_put_parent(struct mdev_parent *parent) -{ - kref_put(&parent->ref, mdev_release_parent); -} - #endif /* MDEV_PRIVATE_H */ diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c index 0ccfeb3dda24..abe3359dd477 100644 --- a/drivers/vfio/mdev/mdev_sysfs.c +++ b/drivers/vfio/mdev/mdev_sysfs.c @@ -9,14 +9,24 @@ #include <linux/sysfs.h> #include <linux/ctype.h> -#include <linux/device.h> #include <linux/slab.h> -#include <linux/uuid.h> #include <linux/mdev.h> #include "mdev_private.h" -/* Static functions */ +struct mdev_type_attribute { + struct attribute attr; + ssize_t (*show)(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf); + ssize_t (*store)(struct mdev_type *mtype, + struct mdev_type_attribute *attr, const char *buf, + size_t count); +}; + +#define MDEV_TYPE_ATTR_RO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) +#define MDEV_TYPE_ATTR_WO(_name) \ + struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) static ssize_t mdev_type_attr_show(struct kobject *kobj, struct attribute *__attr, char *buf) @@ -74,152 +84,156 @@ static ssize_t create_store(struct mdev_type *mtype, return count; } - static MDEV_TYPE_ATTR_WO(create); +static ssize_t device_api_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%s\n", mtype->parent->mdev_driver->device_api); +} +static MDEV_TYPE_ATTR_RO(device_api); + +static ssize_t name_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", + mtype->pretty_name ? mtype->pretty_name : mtype->sysfs_name); +} + +static MDEV_TYPE_ATTR_RO(name); + +static ssize_t available_instances_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, + char *buf) +{ + struct mdev_driver *drv = mtype->parent->mdev_driver; + + if (drv->get_available) + return sysfs_emit(buf, "%u\n", drv->get_available(mtype)); + return sysfs_emit(buf, "%u\n", + atomic_read(&mtype->parent->available_instances)); +} +static MDEV_TYPE_ATTR_RO(available_instances); + +static ssize_t description_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, + char *buf) +{ + return mtype->parent->mdev_driver->show_description(mtype, buf); +} +static MDEV_TYPE_ATTR_RO(description); + +static struct attribute *mdev_types_core_attrs[] = { + &mdev_type_attr_create.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_name.attr, + &mdev_type_attr_available_instances.attr, + &mdev_type_attr_description.attr, + NULL, +}; + +static umode_t mdev_types_core_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + if (attr == &mdev_type_attr_description.attr && + !to_mdev_type(kobj)->parent->mdev_driver->show_description) + return 0; + return attr->mode; +} + +static struct attribute_group mdev_type_core_group = { + .attrs = mdev_types_core_attrs, + .is_visible = mdev_types_core_is_visible, +}; + +static const struct attribute_group *mdev_type_groups[] = { + &mdev_type_core_group, + NULL, +}; + static void mdev_type_release(struct kobject *kobj) { struct mdev_type *type = to_mdev_type(kobj); pr_debug("Releasing group %s\n", kobj->name); /* Pairs with the get in add_mdev_supported_type() */ - mdev_put_parent(type->parent); - kfree(type); + put_device(type->parent->dev); } static struct kobj_type mdev_type_ktype = { - .sysfs_ops = &mdev_type_sysfs_ops, - .release = mdev_type_release, + .sysfs_ops = &mdev_type_sysfs_ops, + .release = mdev_type_release, + .default_groups = mdev_type_groups, }; -static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent, - unsigned int type_group_id) +static int mdev_type_add(struct mdev_parent *parent, struct mdev_type *type) { - struct mdev_type *type; - struct attribute_group *group = - parent->mdev_driver->supported_type_groups[type_group_id]; int ret; - if (!group->name) { - pr_err("%s: Type name empty!\n", __func__); - return ERR_PTR(-EINVAL); - } - - type = kzalloc(sizeof(*type), GFP_KERNEL); - if (!type) - return ERR_PTR(-ENOMEM); - type->kobj.kset = parent->mdev_types_kset; type->parent = parent; /* Pairs with the put in mdev_type_release() */ - mdev_get_parent(parent); - type->type_group_id = type_group_id; + get_device(parent->dev); ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL, "%s-%s", dev_driver_string(parent->dev), - group->name); + type->sysfs_name); if (ret) { kobject_put(&type->kobj); - return ERR_PTR(ret); + return ret; } - ret = sysfs_create_file(&type->kobj, &mdev_type_attr_create.attr); - if (ret) - goto attr_create_failed; - type->devices_kobj = kobject_create_and_add("devices", &type->kobj); if (!type->devices_kobj) { ret = -ENOMEM; goto attr_devices_failed; } - ret = sysfs_create_files(&type->kobj, - (const struct attribute **)group->attrs); - if (ret) { - ret = -ENOMEM; - goto attrs_failed; - } - return type; + return 0; -attrs_failed: - kobject_put(type->devices_kobj); attr_devices_failed: - sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); -attr_create_failed: kobject_del(&type->kobj); kobject_put(&type->kobj); - return ERR_PTR(ret); + return ret; } -static void remove_mdev_supported_type(struct mdev_type *type) +static void mdev_type_remove(struct mdev_type *type) { - struct attribute_group *group = - type->parent->mdev_driver->supported_type_groups[type->type_group_id]; - - sysfs_remove_files(&type->kobj, - (const struct attribute **)group->attrs); kobject_put(type->devices_kobj); - sysfs_remove_file(&type->kobj, &mdev_type_attr_create.attr); kobject_del(&type->kobj); kobject_put(&type->kobj); } -static int add_mdev_supported_type_groups(struct mdev_parent *parent) -{ - int i; - - for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) { - struct mdev_type *type; - - type = add_mdev_supported_type(parent, i); - if (IS_ERR(type)) { - struct mdev_type *ltype, *tmp; - - list_for_each_entry_safe(ltype, tmp, &parent->type_list, - next) { - list_del(<ype->next); - remove_mdev_supported_type(ltype); - } - return PTR_ERR(type); - } - list_add(&type->next, &parent->type_list); - } - return 0; -} - /* mdev sysfs functions */ void parent_remove_sysfs_files(struct mdev_parent *parent) { - struct mdev_type *type, *tmp; - - list_for_each_entry_safe(type, tmp, &parent->type_list, next) { - list_del(&type->next); - remove_mdev_supported_type(type); - } + int i; + for (i = 0; i < parent->nr_types; i++) + mdev_type_remove(parent->types[i]); kset_unregister(parent->mdev_types_kset); } int parent_create_sysfs_files(struct mdev_parent *parent) { - int ret; + int ret, i; parent->mdev_types_kset = kset_create_and_add("mdev_supported_types", NULL, &parent->dev->kobj); - if (!parent->mdev_types_kset) return -ENOMEM; - INIT_LIST_HEAD(&parent->type_list); - - ret = add_mdev_supported_type_groups(parent); - if (ret) - goto create_err; + for (i = 0; i < parent->nr_types; i++) { + ret = mdev_type_add(parent, parent->types[i]); + if (ret) + goto out_err; + } return 0; -create_err: - kset_unregister(parent->mdev_types_kset); - return ret; +out_err: + while (--i >= 0) + mdev_type_remove(parent->types[i]); + return 0; } static ssize_t remove_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index ea762e28c1cc..39eeca18a0f7 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -16,7 +16,7 @@ #include "hisi_acc_vfio_pci.h" -/* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */ +/* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */ static int qm_wait_dev_not_ready(struct hisi_qm *qm) { u32 val; @@ -189,7 +189,7 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) struct device *dev = &qm->pdev->dev; int ret; - /* check VF state */ + /* Check VF state */ if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM device is not ready to write\n"); return -EBUSY; @@ -337,16 +337,7 @@ static int vf_qm_cache_wb(struct hisi_qm *qm) return 0; } -static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev) -{ - struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); - - return container_of(core_device, struct hisi_acc_vf_core_device, - core_device); -} - -static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev, - struct hisi_qm *qm) +static void vf_qm_fun_reset(struct hisi_qm *qm) { int i; @@ -382,7 +373,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; } - /* vf qp num check */ + /* VF qp num check */ ret = qm_get_vft(vf_qm, &vf_qm->qp_base); if (ret <= 0) { dev_err(dev, "failed to get vft qp nums\n"); @@ -396,7 +387,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_qm->qp_num = ret; - /* vf isolation state check */ + /* VF isolation state check */ ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1); if (ret) { dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n"); @@ -405,7 +396,7 @@ static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev, if (vf_data->que_iso_cfg != que_iso_state) { dev_err(dev, "failed to match isolation state\n"); - return ret; + return -EINVAL; } ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1); @@ -427,10 +418,10 @@ static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, int ret; vf_data->acc_magic = ACC_DEV_MAGIC; - /* save device id */ + /* Save device id */ vf_data->dev_id = hisi_acc_vdev->vf_dev->device; - /* vf qp num save from PF */ + /* VF qp num save from PF */ ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base); if (ret <= 0) { dev_err(dev, "failed to get vft qp nums!\n"); @@ -474,19 +465,19 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev, ret = qm_set_regs(qm, vf_data); if (ret) { - dev_err(dev, "Set VF regs failed\n"); + dev_err(dev, "set VF regs failed\n"); return ret; } ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) { - dev_err(dev, "Set sqc failed\n"); + dev_err(dev, "set sqc failed\n"); return ret; } ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) { - dev_err(dev, "Set cqc failed\n"); + dev_err(dev, "set cqc failed\n"); return ret; } @@ -528,12 +519,12 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, return -EINVAL; /* Every reg is 32 bit, the dma address is 64 bit. */ - vf_data->eqe_dma = vf_data->qm_eqc_dw[2]; + vf_data->eqe_dma = vf_data->qm_eqc_dw[1]; vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->eqe_dma |= vf_data->qm_eqc_dw[1]; - vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2]; + vf_data->eqe_dma |= vf_data->qm_eqc_dw[0]; + vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1]; vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET; - vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1]; + vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0]; /* Through SQC_BT/CQC_BT to get sqc and cqc address */ ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma); @@ -552,6 +543,14 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, return 0; } +static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + return container_of(core_device, struct hisi_acc_vf_core_device, + core_device); +} + /* Check the PF's RAS state and Function INT state */ static int hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) @@ -662,7 +661,10 @@ static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vd if (hisi_acc_vdev->vf_qm_state != QM_READY) return; - vf_qm_fun_reset(hisi_acc_vdev, vf_qm); + /* Make sure the device is enabled */ + qm_dev_cmd_init(vf_qm); + + vf_qm_fun_reset(vf_qm); } static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev) @@ -970,7 +972,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); if (hisi_acc_vdev->core_device.vdev.migration_flags != VFIO_MIGRATION_STOP_COPY) @@ -1213,8 +1215,28 @@ static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = { .migration_get_state = hisi_acc_vfio_pci_get_device_state, }; +static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev) +{ + struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev, + struct hisi_acc_vf_core_device, core_device.vdev); + struct pci_dev *pdev = to_pci_dev(core_vdev->dev); + struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev); + + hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1; + hisi_acc_vdev->pf_qm = pf_qm; + hisi_acc_vdev->vf_dev = pdev; + mutex_init(&hisi_acc_vdev->state_mutex); + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY; + core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops; + + return vfio_pci_core_init_dev(core_vdev); +} + static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = { .name = "hisi-acc-vfio-pci-migration", + .init = hisi_acc_vfio_pci_migrn_init_dev, + .release = vfio_pci_core_release_dev, .open_device = hisi_acc_vfio_pci_open_device, .close_device = hisi_acc_vfio_pci_close_device, .ioctl = hisi_acc_vfio_pci_ioctl, @@ -1228,6 +1250,8 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = { static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { .name = "hisi-acc-vfio-pci", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, .open_device = hisi_acc_vfio_pci_open_device, .close_device = vfio_pci_core_close_device, .ioctl = vfio_pci_core_ioctl, @@ -1239,73 +1263,45 @@ static const struct vfio_device_ops hisi_acc_vfio_pci_ops = { .match = vfio_pci_core_match, }; -static int -hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev, - struct pci_dev *pdev, struct hisi_qm *pf_qm) -{ - int vf_id; - - vf_id = pci_iov_vf_id(pdev); - if (vf_id < 0) - return vf_id; - - hisi_acc_vdev->vf_id = vf_id + 1; - hisi_acc_vdev->core_device.vdev.migration_flags = - VFIO_MIGRATION_STOP_COPY; - hisi_acc_vdev->pf_qm = pf_qm; - hisi_acc_vdev->vf_dev = pdev; - mutex_init(&hisi_acc_vdev->state_mutex); - - return 0; -} - static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_acc_vf_core_device *hisi_acc_vdev; + const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops; struct hisi_qm *pf_qm; + int vf_id; int ret; - hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL); - if (!hisi_acc_vdev) - return -ENOMEM; - pf_qm = hisi_acc_get_pf_qm(pdev); if (pf_qm && pf_qm->ver >= QM_HW_V3) { - ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm); - if (!ret) { - vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev, - &hisi_acc_vfio_pci_migrn_ops); - hisi_acc_vdev->core_device.vdev.mig_ops = - &hisi_acc_vfio_pci_migrn_state_ops; - } else { + vf_id = pci_iov_vf_id(pdev); + if (vf_id >= 0) + ops = &hisi_acc_vfio_pci_migrn_ops; + else pci_warn(pdev, "migration support failed, continue with generic interface\n"); - vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev, - &hisi_acc_vfio_pci_ops); - } - } else { - vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev, - &hisi_acc_vfio_pci_ops); } + hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device, + core_device.vdev, &pdev->dev, ops); + if (IS_ERR(hisi_acc_vdev)) + return PTR_ERR(hisi_acc_vdev); + dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device); ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device); if (ret) - goto out_free; + goto out_put_vdev; return 0; -out_free: - vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device); - kfree(hisi_acc_vdev); +out_put_vdev: + vfio_put_device(&hisi_acc_vdev->core_device.vdev); return ret; } static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev); vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device); - vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device); - kfree(hisi_acc_vdev); + vfio_put_device(&hisi_acc_vdev->core_device.vdev); } static const struct pci_device_id hisi_acc_vfio_pci_table[] = { diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h index 5494f4983bbe..67343325b320 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h @@ -16,7 +16,6 @@ #define SEC_CORE_INT_STATUS 0x301008 #define HPRE_HAC_INT_STATUS 0x301800 #define HZIP_CORE_INT_STATUS 0x3010AC -#define QM_QUE_ISO_CFG 0x301154 #define QM_VFT_CFG_RDY 0x10006c #define QM_VFT_CFG_OP_WR 0x100058 @@ -80,7 +79,7 @@ struct acc_vf_data { /* QM reserved 5 regs */ u32 qm_rsv_regs[5]; u32 padding; - /* qm memory init information */ + /* QM memory init information */ u64 eqe_dma; u64 aeqe_dma; u64 sqc_dma; @@ -99,7 +98,7 @@ struct hisi_acc_vf_migration_file { struct hisi_acc_vf_core_device { struct vfio_pci_core_device core_device; u8 deferred_reset:1; - /* for migration state */ + /* For migration state */ struct mutex state_mutex; enum vfio_device_mig_state mig_state; struct pci_dev *pf_dev; @@ -108,7 +107,7 @@ struct hisi_acc_vf_core_device { struct hisi_qm vf_qm; u32 vf_qm_state; int vf_id; - /* for reset handler */ + /* For reset handler */ spinlock_t reset_lock; struct hisi_acc_vf_migration_file *resuming_migf; struct hisi_acc_vf_migration_file *saving_migf; diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index dd5d7bfe0a49..c604b70437a5 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -5,8 +5,12 @@ #include "cmd.h" +enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 }; + static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id, u16 *vhca_id); +static void +_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev); int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) { @@ -66,25 +70,35 @@ int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, return 0; } +static void set_tracker_error(struct mlx5vf_pci_core_device *mvdev) +{ + /* Mark the tracker under an error and wake it up if it's running */ + mvdev->tracker.is_err = true; + complete(&mvdev->tracker_comp); +} + static int mlx5fv_vf_event(struct notifier_block *nb, unsigned long event, void *data) { struct mlx5vf_pci_core_device *mvdev = container_of(nb, struct mlx5vf_pci_core_device, nb); - mutex_lock(&mvdev->state_mutex); switch (event) { case MLX5_PF_NOTIFY_ENABLE_VF: + mutex_lock(&mvdev->state_mutex); mvdev->mdev_detach = false; + mlx5vf_state_mutex_unlock(mvdev); break; case MLX5_PF_NOTIFY_DISABLE_VF: - mlx5vf_disable_fds(mvdev); + mlx5vf_cmd_close_migratable(mvdev); + mutex_lock(&mvdev->state_mutex); mvdev->mdev_detach = true; + mlx5vf_state_mutex_unlock(mvdev); break; default: break; } - mlx5vf_state_mutex_unlock(mvdev); + return 0; } @@ -93,8 +107,11 @@ void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev) if (!mvdev->migrate_cap) return; + /* Must be done outside the lock to let it progress */ + set_tracker_error(mvdev); mutex_lock(&mvdev->state_mutex); mlx5vf_disable_fds(mvdev); + _mlx5vf_free_page_tracker_resources(mvdev); mlx5vf_state_mutex_unlock(mvdev); } @@ -109,7 +126,8 @@ void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev) } void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, - const struct vfio_migration_ops *mig_ops) + const struct vfio_migration_ops *mig_ops, + const struct vfio_log_ops *log_ops) { struct pci_dev *pdev = mvdev->core_device.pdev; int ret; @@ -151,6 +169,9 @@ void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P; mvdev->core_device.vdev.mig_ops = mig_ops; + init_completion(&mvdev->tracker_comp); + if (MLX5_CAP_GEN(mvdev->mdev, adv_virtualization)) + mvdev->core_device.vdev.log_ops = log_ops; end: mlx5_vf_put_core_dev(mvdev->mdev); @@ -188,11 +209,13 @@ err_exec: return ret; } -static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn, - struct mlx5_vf_migration_file *migf, u32 *mkey) +static int _create_mkey(struct mlx5_core_dev *mdev, u32 pdn, + struct mlx5_vf_migration_file *migf, + struct mlx5_vhca_recv_buf *recv_buf, + u32 *mkey) { - size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE); - struct sg_dma_page_iter dma_iter; + size_t npages = migf ? DIV_ROUND_UP(migf->total_length, PAGE_SIZE) : + recv_buf->npages; int err = 0, inlen; __be64 *mtt; void *mkc; @@ -209,8 +232,17 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn, DIV_ROUND_UP(npages, 2)); mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0) - *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter)); + if (migf) { + struct sg_dma_page_iter dma_iter; + + for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0) + *mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter)); + } else { + int i; + + for (i = 0; i < npages; i++) + *mtt++ = cpu_to_be64(recv_buf->dma_addrs[i]); + } mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); @@ -223,7 +255,8 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn, MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2)); - MLX5_SET64(mkc, mkc, len, migf->total_length); + MLX5_SET64(mkc, mkc, len, + migf ? migf->total_length : (npages * PAGE_SIZE)); err = mlx5_core_create_mkey(mdev, mkey, in, inlen); kvfree(in); return err; @@ -297,7 +330,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, if (err) goto err_dma_map; - err = _create_state_mkey(mdev, pdn, migf, &mkey); + err = _create_mkey(mdev, pdn, migf, NULL, &mkey); if (err) goto err_create_mkey; @@ -369,7 +402,7 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, if (err) goto err_reg; - err = _create_state_mkey(mdev, pdn, migf, &mkey); + err = _create_mkey(mdev, pdn, migf, NULL, &mkey); if (err) goto err_mkey; @@ -391,3 +424,939 @@ end: mutex_unlock(&migf->lock); return err; } + +static void combine_ranges(struct rb_root_cached *root, u32 cur_nodes, + u32 req_nodes) +{ + struct interval_tree_node *prev, *curr, *comb_start, *comb_end; + unsigned long min_gap; + unsigned long curr_gap; + + /* Special shortcut when a single range is required */ + if (req_nodes == 1) { + unsigned long last; + + curr = comb_start = interval_tree_iter_first(root, 0, ULONG_MAX); + while (curr) { + last = curr->last; + prev = curr; + curr = interval_tree_iter_next(curr, 0, ULONG_MAX); + if (prev != comb_start) + interval_tree_remove(prev, root); + } + comb_start->last = last; + return; + } + + /* Combine ranges which have the smallest gap */ + while (cur_nodes > req_nodes) { + prev = NULL; + min_gap = ULONG_MAX; + curr = interval_tree_iter_first(root, 0, ULONG_MAX); + while (curr) { + if (prev) { + curr_gap = curr->start - prev->last; + if (curr_gap < min_gap) { + min_gap = curr_gap; + comb_start = prev; + comb_end = curr; + } + } + prev = curr; + curr = interval_tree_iter_next(curr, 0, ULONG_MAX); + } + comb_start->last = comb_end->last; + interval_tree_remove(comb_end, root); + cur_nodes--; + } +} + +static int mlx5vf_create_tracker(struct mlx5_core_dev *mdev, + struct mlx5vf_pci_core_device *mvdev, + struct rb_root_cached *ranges, u32 nnodes) +{ + int max_num_range = + MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_max_num_range); + struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; + int record_size = MLX5_ST_SZ_BYTES(page_track_range); + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + struct interval_tree_node *node = NULL; + u64 total_ranges_len = 0; + u32 num_ranges = nnodes; + u8 log_addr_space_size; + void *range_list_ptr; + void *obj_context; + void *cmd_hdr; + int inlen; + void *in; + int err; + int i; + + if (num_ranges > max_num_range) { + combine_ranges(ranges, nnodes, max_num_range); + num_ranges = max_num_range; + } + + inlen = MLX5_ST_SZ_BYTES(create_page_track_obj_in) + + record_size * num_ranges; + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cmd_hdr = MLX5_ADDR_OF(create_page_track_obj_in, in, + general_obj_in_cmd_hdr); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, + MLX5_OBJ_TYPE_PAGE_TRACK); + obj_context = MLX5_ADDR_OF(create_page_track_obj_in, in, obj_context); + MLX5_SET(page_track, obj_context, vhca_id, mvdev->vhca_id); + MLX5_SET(page_track, obj_context, track_type, 1); + MLX5_SET(page_track, obj_context, log_page_size, + ilog2(tracker->host_qp->tracked_page_size)); + MLX5_SET(page_track, obj_context, log_msg_size, + ilog2(tracker->host_qp->max_msg_size)); + MLX5_SET(page_track, obj_context, reporting_qpn, tracker->fw_qp->qpn); + MLX5_SET(page_track, obj_context, num_ranges, num_ranges); + + range_list_ptr = MLX5_ADDR_OF(page_track, obj_context, track_range); + node = interval_tree_iter_first(ranges, 0, ULONG_MAX); + for (i = 0; i < num_ranges; i++) { + void *addr_range_i_base = range_list_ptr + record_size * i; + unsigned long length = node->last - node->start; + + MLX5_SET64(page_track_range, addr_range_i_base, start_address, + node->start); + MLX5_SET64(page_track_range, addr_range_i_base, length, length); + total_ranges_len += length; + node = interval_tree_iter_next(node, 0, ULONG_MAX); + } + + WARN_ON(node); + log_addr_space_size = ilog2(total_ranges_len); + if (log_addr_space_size < + (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_min_addr_space)) || + log_addr_space_size > + (MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_addr_space))) { + err = -EOPNOTSUPP; + goto out; + } + + MLX5_SET(page_track, obj_context, log_addr_space_size, + log_addr_space_size); + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + if (err) + goto out; + + tracker->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); +out: + kfree(in); + return err; +} + +static int mlx5vf_cmd_destroy_tracker(struct mlx5_core_dev *mdev, + u32 tracker_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, tracker_id); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5vf_cmd_modify_tracker(struct mlx5_core_dev *mdev, + u32 tracker_id, unsigned long iova, + unsigned long length, u32 tracker_state) +{ + u32 in[MLX5_ST_SZ_DW(modify_page_track_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + void *obj_context; + void *cmd_hdr; + + cmd_hdr = MLX5_ADDR_OF(modify_page_track_obj_in, in, general_obj_in_cmd_hdr); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_PAGE_TRACK); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, tracker_id); + + obj_context = MLX5_ADDR_OF(modify_page_track_obj_in, in, obj_context); + MLX5_SET64(page_track, obj_context, modify_field_select, 0x3); + MLX5_SET64(page_track, obj_context, range_start_address, iova); + MLX5_SET64(page_track, obj_context, length, length); + MLX5_SET(page_track, obj_context, state, tracker_state); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static int alloc_cq_frag_buf(struct mlx5_core_dev *mdev, + struct mlx5_vhca_cq_buf *buf, int nent, + int cqe_size) +{ + struct mlx5_frag_buf *frag_buf = &buf->frag_buf; + u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0); + u8 log_wq_sz = ilog2(cqe_size); + int err; + + err = mlx5_frag_buf_alloc_node(mdev, nent * cqe_size, frag_buf, + mdev->priv.numa_node); + if (err) + return err; + + mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); + buf->cqe_size = cqe_size; + buf->nent = nent; + return 0; +} + +static void init_cq_frag_buf(struct mlx5_vhca_cq_buf *buf) +{ + struct mlx5_cqe64 *cqe64; + void *cqe; + int i; + + for (i = 0; i < buf->nent; i++) { + cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i); + cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; + cqe64->op_own = MLX5_CQE_INVALID << 4; + } +} + +static void mlx5vf_destroy_cq(struct mlx5_core_dev *mdev, + struct mlx5_vhca_cq *cq) +{ + mlx5_core_destroy_cq(mdev, &cq->mcq); + mlx5_frag_buf_free(mdev, &cq->buf.frag_buf); + mlx5_db_free(mdev, &cq->db); +} + +static void mlx5vf_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) +{ + if (type != MLX5_EVENT_TYPE_CQ_ERROR) + return; + + set_tracker_error(container_of(mcq, struct mlx5vf_pci_core_device, + tracker.cq.mcq)); +} + +static int mlx5vf_event_notifier(struct notifier_block *nb, unsigned long type, + void *data) +{ + struct mlx5_vhca_page_tracker *tracker = + mlx5_nb_cof(nb, struct mlx5_vhca_page_tracker, nb); + struct mlx5vf_pci_core_device *mvdev = container_of( + tracker, struct mlx5vf_pci_core_device, tracker); + struct mlx5_eqe *eqe = data; + u8 event_type = (u8)type; + u8 queue_type; + int qp_num; + + switch (event_type) { + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + queue_type = eqe->data.qp_srq.type; + if (queue_type != MLX5_EVENT_QUEUE_TYPE_QP) + break; + qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + if (qp_num != tracker->host_qp->qpn && + qp_num != tracker->fw_qp->qpn) + break; + set_tracker_error(mvdev); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static void mlx5vf_cq_complete(struct mlx5_core_cq *mcq, + struct mlx5_eqe *eqe) +{ + struct mlx5vf_pci_core_device *mvdev = + container_of(mcq, struct mlx5vf_pci_core_device, + tracker.cq.mcq); + + complete(&mvdev->tracker_comp); +} + +static int mlx5vf_create_cq(struct mlx5_core_dev *mdev, + struct mlx5_vhca_page_tracker *tracker, + size_t ncqe) +{ + int cqe_size = cache_line_size() == 128 ? 128 : 64; + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_vhca_cq *cq; + int inlen, err, eqn; + void *cqc, *in; + __be64 *pas; + int vector; + + cq = &tracker->cq; + ncqe = roundup_pow_of_two(ncqe); + err = mlx5_db_alloc_node(mdev, &cq->db, mdev->priv.numa_node); + if (err) + return err; + + cq->ncqe = ncqe; + cq->mcq.set_ci_db = cq->db.db; + cq->mcq.arm_db = cq->db.db + 1; + cq->mcq.cqe_sz = cqe_size; + err = alloc_cq_frag_buf(mdev, &cq->buf, ncqe, cqe_size); + if (err) + goto err_db_free; + + init_cq_frag_buf(&cq->buf); + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * + cq->buf.frag_buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_buff; + } + + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); + err = mlx5_vector2eqn(mdev, vector, &eqn); + if (err) + goto err_vec; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, tracker->uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->buf.frag_buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); + pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); + mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); + cq->mcq.comp = mlx5vf_cq_complete; + cq->mcq.event = mlx5vf_cq_event; + err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); + if (err) + goto err_vec; + + mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map, + cq->mcq.cons_index); + kvfree(in); + return 0; + +err_vec: + kvfree(in); +err_buff: + mlx5_frag_buf_free(mdev, &cq->buf.frag_buf); +err_db_free: + mlx5_db_free(mdev, &cq->db); + return err; +} + +static struct mlx5_vhca_qp * +mlx5vf_create_rc_qp(struct mlx5_core_dev *mdev, + struct mlx5_vhca_page_tracker *tracker, u32 max_recv_wr) +{ + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + struct mlx5_vhca_qp *qp; + u8 log_rq_stride; + u8 log_rq_sz; + void *qpc; + int inlen; + void *in; + int err; + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return ERR_PTR(-ENOMEM); + + qp->rq.wqe_cnt = roundup_pow_of_two(max_recv_wr); + log_rq_stride = ilog2(MLX5_SEND_WQE_DS); + log_rq_sz = ilog2(qp->rq.wqe_cnt); + err = mlx5_db_alloc_node(mdev, &qp->db, mdev->priv.numa_node); + if (err) + goto err_free; + + if (max_recv_wr) { + err = mlx5_frag_buf_alloc_node(mdev, + wq_get_byte_sz(log_rq_sz, log_rq_stride), + &qp->buf, mdev->priv.numa_node); + if (err) + goto err_db_free; + mlx5_init_fbc(qp->buf.frags, log_rq_stride, log_rq_sz, &qp->rq.fbc); + } + + qp->rq.db = &qp->db.db[MLX5_RCV_DBR]; + inlen = MLX5_ST_SZ_BYTES(create_qp_in) + + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * + qp->buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_in; + } + + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, tracker->pdn); + MLX5_SET(qpc, qpc, uar_page, tracker->uar->index); + MLX5_SET(qpc, qpc, log_page_size, + qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); + if (MLX5_CAP_GEN(mdev, cqe_version) == 1) + MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); + MLX5_SET(qpc, qpc, no_sq, 1); + if (max_recv_wr) { + MLX5_SET(qpc, qpc, cqn_rcv, tracker->cq.mcq.cqn); + MLX5_SET(qpc, qpc, log_rq_stride, log_rq_stride - 4); + MLX5_SET(qpc, qpc, log_rq_size, log_rq_sz); + MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); + MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); + mlx5_fill_page_frag_array(&qp->buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, + in, pas)); + } else { + MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); + } + + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + kvfree(in); + if (err) + goto err_in; + + qp->qpn = MLX5_GET(create_qp_out, out, qpn); + return qp; + +err_in: + if (max_recv_wr) + mlx5_frag_buf_free(mdev, &qp->buf); +err_db_free: + mlx5_db_free(mdev, &qp->db); +err_free: + kfree(qp); + return ERR_PTR(err); +} + +static void mlx5vf_post_recv(struct mlx5_vhca_qp *qp) +{ + struct mlx5_wqe_data_seg *data; + unsigned int ix; + + WARN_ON(qp->rq.pc - qp->rq.cc >= qp->rq.wqe_cnt); + ix = qp->rq.pc & (qp->rq.wqe_cnt - 1); + data = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ix); + data->byte_count = cpu_to_be32(qp->max_msg_size); + data->lkey = cpu_to_be32(qp->recv_buf.mkey); + data->addr = cpu_to_be64(qp->recv_buf.next_rq_offset); + qp->rq.pc++; + /* Make sure that descriptors are written before doorbell record. */ + dma_wmb(); + *qp->rq.db = cpu_to_be32(qp->rq.pc & 0xffff); +} + +static int mlx5vf_activate_qp(struct mlx5_core_dev *mdev, + struct mlx5_vhca_qp *qp, u32 remote_qpn, + bool host_qp) +{ + u32 init_in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; + u32 rtr_in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; + u32 rts_in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; + void *qpc; + int ret; + + /* Init */ + qpc = MLX5_ADDR_OF(rst2init_qp_in, init_in, qpc); + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); + MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED); + MLX5_SET(qpc, qpc, rre, 1); + MLX5_SET(qpc, qpc, rwe, 1); + MLX5_SET(rst2init_qp_in, init_in, opcode, MLX5_CMD_OP_RST2INIT_QP); + MLX5_SET(rst2init_qp_in, init_in, qpn, qp->qpn); + ret = mlx5_cmd_exec_in(mdev, rst2init_qp, init_in); + if (ret) + return ret; + + if (host_qp) { + struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf; + int i; + + for (i = 0; i < qp->rq.wqe_cnt; i++) { + mlx5vf_post_recv(qp); + recv_buf->next_rq_offset += qp->max_msg_size; + } + } + + /* RTR */ + qpc = MLX5_ADDR_OF(init2rtr_qp_in, rtr_in, qpc); + MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn); + MLX5_SET(qpc, qpc, mtu, IB_MTU_4096); + MLX5_SET(qpc, qpc, log_msg_max, MLX5_CAP_GEN(mdev, log_max_msg)); + MLX5_SET(qpc, qpc, remote_qpn, remote_qpn); + MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); + MLX5_SET(qpc, qpc, primary_address_path.fl, 1); + MLX5_SET(qpc, qpc, min_rnr_nak, 1); + MLX5_SET(init2rtr_qp_in, rtr_in, opcode, MLX5_CMD_OP_INIT2RTR_QP); + MLX5_SET(init2rtr_qp_in, rtr_in, qpn, qp->qpn); + ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, rtr_in); + if (ret || host_qp) + return ret; + + /* RTS */ + qpc = MLX5_ADDR_OF(rtr2rts_qp_in, rts_in, qpc); + MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn); + MLX5_SET(qpc, qpc, retry_count, 7); + MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */ + MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */ + MLX5_SET(rtr2rts_qp_in, rts_in, opcode, MLX5_CMD_OP_RTR2RTS_QP); + MLX5_SET(rtr2rts_qp_in, rts_in, qpn, qp->qpn); + + return mlx5_cmd_exec_in(mdev, rtr2rts_qp, rts_in); +} + +static void mlx5vf_destroy_qp(struct mlx5_core_dev *mdev, + struct mlx5_vhca_qp *qp) +{ + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; + + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); + mlx5_cmd_exec_in(mdev, destroy_qp, in); + + mlx5_frag_buf_free(mdev, &qp->buf); + mlx5_db_free(mdev, &qp->db); + kfree(qp); +} + +static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf) +{ + int i; + + /* Undo alloc_pages_bulk_array() */ + for (i = 0; i < recv_buf->npages; i++) + __free_page(recv_buf->page_list[i]); + + kvfree(recv_buf->page_list); +} + +static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf, + unsigned int npages) +{ + unsigned int filled = 0, done = 0; + int i; + + recv_buf->page_list = kvcalloc(npages, sizeof(*recv_buf->page_list), + GFP_KERNEL); + if (!recv_buf->page_list) + return -ENOMEM; + + for (;;) { + filled = alloc_pages_bulk_array(GFP_KERNEL, npages - done, + recv_buf->page_list + done); + if (!filled) + goto err; + + done += filled; + if (done == npages) + break; + } + + recv_buf->npages = npages; + return 0; + +err: + for (i = 0; i < npages; i++) { + if (recv_buf->page_list[i]) + __free_page(recv_buf->page_list[i]); + } + + kvfree(recv_buf->page_list); + return -ENOMEM; +} + +static int register_dma_recv_pages(struct mlx5_core_dev *mdev, + struct mlx5_vhca_recv_buf *recv_buf) +{ + int i, j; + + recv_buf->dma_addrs = kvcalloc(recv_buf->npages, + sizeof(*recv_buf->dma_addrs), + GFP_KERNEL); + if (!recv_buf->dma_addrs) + return -ENOMEM; + + for (i = 0; i < recv_buf->npages; i++) { + recv_buf->dma_addrs[i] = dma_map_page(mdev->device, + recv_buf->page_list[i], + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(mdev->device, recv_buf->dma_addrs[i])) + goto error; + } + return 0; + +error: + for (j = 0; j < i; j++) + dma_unmap_single(mdev->device, recv_buf->dma_addrs[j], + PAGE_SIZE, DMA_FROM_DEVICE); + + kvfree(recv_buf->dma_addrs); + return -ENOMEM; +} + +static void unregister_dma_recv_pages(struct mlx5_core_dev *mdev, + struct mlx5_vhca_recv_buf *recv_buf) +{ + int i; + + for (i = 0; i < recv_buf->npages; i++) + dma_unmap_single(mdev->device, recv_buf->dma_addrs[i], + PAGE_SIZE, DMA_FROM_DEVICE); + + kvfree(recv_buf->dma_addrs); +} + +static void mlx5vf_free_qp_recv_resources(struct mlx5_core_dev *mdev, + struct mlx5_vhca_qp *qp) +{ + struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf; + + mlx5_core_destroy_mkey(mdev, recv_buf->mkey); + unregister_dma_recv_pages(mdev, recv_buf); + free_recv_pages(&qp->recv_buf); +} + +static int mlx5vf_alloc_qp_recv_resources(struct mlx5_core_dev *mdev, + struct mlx5_vhca_qp *qp, u32 pdn, + u64 rq_size) +{ + unsigned int npages = DIV_ROUND_UP_ULL(rq_size, PAGE_SIZE); + struct mlx5_vhca_recv_buf *recv_buf = &qp->recv_buf; + int err; + + err = alloc_recv_pages(recv_buf, npages); + if (err < 0) + return err; + + err = register_dma_recv_pages(mdev, recv_buf); + if (err) + goto end; + + err = _create_mkey(mdev, pdn, NULL, recv_buf, &recv_buf->mkey); + if (err) + goto err_create_mkey; + + return 0; + +err_create_mkey: + unregister_dma_recv_pages(mdev, recv_buf); +end: + free_recv_pages(recv_buf); + return err; +} + +static void +_mlx5vf_free_page_tracker_resources(struct mlx5vf_pci_core_device *mvdev) +{ + struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; + struct mlx5_core_dev *mdev = mvdev->mdev; + + lockdep_assert_held(&mvdev->state_mutex); + + if (!mvdev->log_active) + return; + + WARN_ON(mvdev->mdev_detach); + + mlx5_eq_notifier_unregister(mdev, &tracker->nb); + mlx5vf_cmd_destroy_tracker(mdev, tracker->id); + mlx5vf_destroy_qp(mdev, tracker->fw_qp); + mlx5vf_free_qp_recv_resources(mdev, tracker->host_qp); + mlx5vf_destroy_qp(mdev, tracker->host_qp); + mlx5vf_destroy_cq(mdev, &tracker->cq); + mlx5_core_dealloc_pd(mdev, tracker->pdn); + mlx5_put_uars_page(mdev, tracker->uar); + mvdev->log_active = false; +} + +int mlx5vf_stop_page_tracker(struct vfio_device *vdev) +{ + struct mlx5vf_pci_core_device *mvdev = container_of( + vdev, struct mlx5vf_pci_core_device, core_device.vdev); + + mutex_lock(&mvdev->state_mutex); + if (!mvdev->log_active) + goto end; + + _mlx5vf_free_page_tracker_resources(mvdev); + mvdev->log_active = false; +end: + mlx5vf_state_mutex_unlock(mvdev); + return 0; +} + +int mlx5vf_start_page_tracker(struct vfio_device *vdev, + struct rb_root_cached *ranges, u32 nnodes, + u64 *page_size) +{ + struct mlx5vf_pci_core_device *mvdev = container_of( + vdev, struct mlx5vf_pci_core_device, core_device.vdev); + struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; + u8 log_tracked_page = ilog2(*page_size); + struct mlx5_vhca_qp *host_qp; + struct mlx5_vhca_qp *fw_qp; + struct mlx5_core_dev *mdev; + u32 max_msg_size = PAGE_SIZE; + u64 rq_size = SZ_2M; + u32 max_recv_wr; + int err; + + mutex_lock(&mvdev->state_mutex); + if (mvdev->mdev_detach) { + err = -ENOTCONN; + goto end; + } + + if (mvdev->log_active) { + err = -EINVAL; + goto end; + } + + mdev = mvdev->mdev; + memset(tracker, 0, sizeof(*tracker)); + tracker->uar = mlx5_get_uars_page(mdev); + if (IS_ERR(tracker->uar)) { + err = PTR_ERR(tracker->uar); + goto end; + } + + err = mlx5_core_alloc_pd(mdev, &tracker->pdn); + if (err) + goto err_uar; + + max_recv_wr = DIV_ROUND_UP_ULL(rq_size, max_msg_size); + err = mlx5vf_create_cq(mdev, tracker, max_recv_wr); + if (err) + goto err_dealloc_pd; + + host_qp = mlx5vf_create_rc_qp(mdev, tracker, max_recv_wr); + if (IS_ERR(host_qp)) { + err = PTR_ERR(host_qp); + goto err_cq; + } + + host_qp->max_msg_size = max_msg_size; + if (log_tracked_page < MLX5_CAP_ADV_VIRTUALIZATION(mdev, + pg_track_log_min_page_size)) { + log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev, + pg_track_log_min_page_size); + } else if (log_tracked_page > MLX5_CAP_ADV_VIRTUALIZATION(mdev, + pg_track_log_max_page_size)) { + log_tracked_page = MLX5_CAP_ADV_VIRTUALIZATION(mdev, + pg_track_log_max_page_size); + } + + host_qp->tracked_page_size = (1ULL << log_tracked_page); + err = mlx5vf_alloc_qp_recv_resources(mdev, host_qp, tracker->pdn, + rq_size); + if (err) + goto err_host_qp; + + fw_qp = mlx5vf_create_rc_qp(mdev, tracker, 0); + if (IS_ERR(fw_qp)) { + err = PTR_ERR(fw_qp); + goto err_recv_resources; + } + + err = mlx5vf_activate_qp(mdev, host_qp, fw_qp->qpn, true); + if (err) + goto err_activate; + + err = mlx5vf_activate_qp(mdev, fw_qp, host_qp->qpn, false); + if (err) + goto err_activate; + + tracker->host_qp = host_qp; + tracker->fw_qp = fw_qp; + err = mlx5vf_create_tracker(mdev, mvdev, ranges, nnodes); + if (err) + goto err_activate; + + MLX5_NB_INIT(&tracker->nb, mlx5vf_event_notifier, NOTIFY_ANY); + mlx5_eq_notifier_register(mdev, &tracker->nb); + *page_size = host_qp->tracked_page_size; + mvdev->log_active = true; + mlx5vf_state_mutex_unlock(mvdev); + return 0; + +err_activate: + mlx5vf_destroy_qp(mdev, fw_qp); +err_recv_resources: + mlx5vf_free_qp_recv_resources(mdev, host_qp); +err_host_qp: + mlx5vf_destroy_qp(mdev, host_qp); +err_cq: + mlx5vf_destroy_cq(mdev, &tracker->cq); +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, tracker->pdn); +err_uar: + mlx5_put_uars_page(mdev, tracker->uar); +end: + mlx5vf_state_mutex_unlock(mvdev); + return err; +} + +static void +set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp, + struct iova_bitmap *dirty) +{ + u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry); + u32 nent = size / entry_size; + struct page *page; + u64 addr; + u64 *buf; + int i; + + if (WARN_ON(index >= qp->recv_buf.npages || + (nent > qp->max_msg_size / entry_size))) + return; + + page = qp->recv_buf.page_list[index]; + buf = kmap_local_page(page); + for (i = 0; i < nent; i++) { + addr = MLX5_GET(page_track_report_entry, buf + i, + dirty_address_low); + addr |= (u64)MLX5_GET(page_track_report_entry, buf + i, + dirty_address_high) << 32; + iova_bitmap_set(dirty, addr, qp->tracked_page_size); + } + kunmap_local(buf); +} + +static void +mlx5vf_rq_cqe(struct mlx5_vhca_qp *qp, struct mlx5_cqe64 *cqe, + struct iova_bitmap *dirty, int *tracker_status) +{ + u32 size; + int ix; + + qp->rq.cc++; + *tracker_status = be32_to_cpu(cqe->immediate) >> 28; + size = be32_to_cpu(cqe->byte_cnt); + ix = be16_to_cpu(cqe->wqe_counter) & (qp->rq.wqe_cnt - 1); + + /* zero length CQE, no data */ + WARN_ON(!size && *tracker_status == MLX5_PAGE_TRACK_STATE_REPORTING); + if (size) + set_report_output(size, ix, qp, dirty); + + qp->recv_buf.next_rq_offset = ix * qp->max_msg_size; + mlx5vf_post_recv(qp); +} + +static void *get_cqe(struct mlx5_vhca_cq *cq, int n) +{ + return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); +} + +static struct mlx5_cqe64 *get_sw_cqe(struct mlx5_vhca_cq *cq, int n) +{ + void *cqe = get_cqe(cq, n & (cq->ncqe - 1)); + struct mlx5_cqe64 *cqe64; + + cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; + + if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) && + !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ncqe)))) { + return cqe64; + } else { + return NULL; + } +} + +static int +mlx5vf_cq_poll_one(struct mlx5_vhca_cq *cq, struct mlx5_vhca_qp *qp, + struct iova_bitmap *dirty, int *tracker_status) +{ + struct mlx5_cqe64 *cqe; + u8 opcode; + + cqe = get_sw_cqe(cq, cq->mcq.cons_index); + if (!cqe) + return CQ_EMPTY; + + ++cq->mcq.cons_index; + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + opcode = get_cqe_opcode(cqe); + switch (opcode) { + case MLX5_CQE_RESP_SEND_IMM: + mlx5vf_rq_cqe(qp, cqe, dirty, tracker_status); + return CQ_OK; + default: + return CQ_POLL_ERR; + } +} + +int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova, + unsigned long length, + struct iova_bitmap *dirty) +{ + struct mlx5vf_pci_core_device *mvdev = container_of( + vdev, struct mlx5vf_pci_core_device, core_device.vdev); + struct mlx5_vhca_page_tracker *tracker = &mvdev->tracker; + struct mlx5_vhca_cq *cq = &tracker->cq; + struct mlx5_core_dev *mdev; + int poll_err, err; + + mutex_lock(&mvdev->state_mutex); + if (!mvdev->log_active) { + err = -EINVAL; + goto end; + } + + if (mvdev->mdev_detach) { + err = -ENOTCONN; + goto end; + } + + mdev = mvdev->mdev; + err = mlx5vf_cmd_modify_tracker(mdev, tracker->id, iova, length, + MLX5_PAGE_TRACK_STATE_REPORTING); + if (err) + goto end; + + tracker->status = MLX5_PAGE_TRACK_STATE_REPORTING; + while (tracker->status == MLX5_PAGE_TRACK_STATE_REPORTING && + !tracker->is_err) { + poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, dirty, + &tracker->status); + if (poll_err == CQ_EMPTY) { + mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, tracker->uar->map, + cq->mcq.cons_index); + poll_err = mlx5vf_cq_poll_one(cq, tracker->host_qp, + dirty, &tracker->status); + if (poll_err == CQ_EMPTY) { + wait_for_completion(&mvdev->tracker_comp); + continue; + } + } + if (poll_err == CQ_POLL_ERR) { + err = -EIO; + goto end; + } + mlx5_cq_set_ci(&cq->mcq); + } + + if (tracker->status == MLX5_PAGE_TRACK_STATE_ERROR) + tracker->is_err = true; + + if (tracker->is_err) + err = -EIO; +end: + mlx5vf_state_mutex_unlock(mvdev); + return err; +} diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h index 8208f4701a90..921d5720a1e5 100644 --- a/drivers/vfio/pci/mlx5/cmd.h +++ b/drivers/vfio/pci/mlx5/cmd.h @@ -9,6 +9,8 @@ #include <linux/kernel.h> #include <linux/vfio_pci_core.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/qp.h> struct mlx5vf_async_data { struct mlx5_async_work cb_work; @@ -39,6 +41,56 @@ struct mlx5_vf_migration_file { struct mlx5vf_async_data async_data; }; +struct mlx5_vhca_cq_buf { + struct mlx5_frag_buf_ctrl fbc; + struct mlx5_frag_buf frag_buf; + int cqe_size; + int nent; +}; + +struct mlx5_vhca_cq { + struct mlx5_vhca_cq_buf buf; + struct mlx5_db db; + struct mlx5_core_cq mcq; + size_t ncqe; +}; + +struct mlx5_vhca_recv_buf { + u32 npages; + struct page **page_list; + dma_addr_t *dma_addrs; + u32 next_rq_offset; + u32 mkey; +}; + +struct mlx5_vhca_qp { + struct mlx5_frag_buf buf; + struct mlx5_db db; + struct mlx5_vhca_recv_buf recv_buf; + u32 tracked_page_size; + u32 max_msg_size; + u32 qpn; + struct { + unsigned int pc; + unsigned int cc; + unsigned int wqe_cnt; + __be32 *db; + struct mlx5_frag_buf_ctrl fbc; + } rq; +}; + +struct mlx5_vhca_page_tracker { + u32 id; + u32 pdn; + u8 is_err:1; + struct mlx5_uars_page *uar; + struct mlx5_vhca_cq cq; + struct mlx5_vhca_qp *host_qp; + struct mlx5_vhca_qp *fw_qp; + struct mlx5_nb nb; + int status; +}; + struct mlx5vf_pci_core_device { struct vfio_pci_core_device core_device; int vf_id; @@ -46,6 +98,8 @@ struct mlx5vf_pci_core_device { u8 migrate_cap:1; u8 deferred_reset:1; u8 mdev_detach:1; + u8 log_active:1; + struct completion tracker_comp; /* protect migration state */ struct mutex state_mutex; enum vfio_device_mig_state mig_state; @@ -53,6 +107,7 @@ struct mlx5vf_pci_core_device { spinlock_t reset_lock; struct mlx5_vf_migration_file *resuming_migf; struct mlx5_vf_migration_file *saving_migf; + struct mlx5_vhca_page_tracker tracker; struct workqueue_struct *cb_wq; struct notifier_block nb; struct mlx5_core_dev *mdev; @@ -63,7 +118,8 @@ int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, size_t *state_size); void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev, - const struct vfio_migration_ops *mig_ops); + const struct vfio_migration_ops *mig_ops, + const struct vfio_log_ops *log_ops); void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_cmd_close_migratable(struct mlx5vf_pci_core_device *mvdev); int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, @@ -73,4 +129,9 @@ int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev); void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work); +int mlx5vf_start_page_tracker(struct vfio_device *vdev, + struct rb_root_cached *ranges, u32 nnodes, u64 *page_size); +int mlx5vf_stop_page_tracker(struct vfio_device *vdev); +int mlx5vf_tracker_read_and_clear(struct vfio_device *vdev, unsigned long iova, + unsigned long length, struct iova_bitmap *dirty); #endif /* MLX5_VFIO_CMD_H */ diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index a9b63d15c5d3..fd6ccb8454a2 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -579,8 +579,41 @@ static const struct vfio_migration_ops mlx5vf_pci_mig_ops = { .migration_get_state = mlx5vf_pci_get_device_state, }; +static const struct vfio_log_ops mlx5vf_pci_log_ops = { + .log_start = mlx5vf_start_page_tracker, + .log_stop = mlx5vf_stop_page_tracker, + .log_read_and_clear = mlx5vf_tracker_read_and_clear, +}; + +static int mlx5vf_pci_init_dev(struct vfio_device *core_vdev) +{ + struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev, + struct mlx5vf_pci_core_device, core_device.vdev); + int ret; + + ret = vfio_pci_core_init_dev(core_vdev); + if (ret) + return ret; + + mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops, + &mlx5vf_pci_log_ops); + + return 0; +} + +static void mlx5vf_pci_release_dev(struct vfio_device *core_vdev) +{ + struct mlx5vf_pci_core_device *mvdev = container_of(core_vdev, + struct mlx5vf_pci_core_device, core_device.vdev); + + mlx5vf_cmd_remove_migratable(mvdev); + vfio_pci_core_release_dev(core_vdev); +} + static const struct vfio_device_ops mlx5vf_pci_ops = { .name = "mlx5-vfio-pci", + .init = mlx5vf_pci_init_dev, + .release = mlx5vf_pci_release_dev, .open_device = mlx5vf_pci_open_device, .close_device = mlx5vf_pci_close_device, .ioctl = vfio_pci_core_ioctl, @@ -598,21 +631,19 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev, struct mlx5vf_pci_core_device *mvdev; int ret; - mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); - if (!mvdev) - return -ENOMEM; - vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops); - mlx5vf_cmd_set_migratable(mvdev, &mlx5vf_pci_mig_ops); + mvdev = vfio_alloc_device(mlx5vf_pci_core_device, core_device.vdev, + &pdev->dev, &mlx5vf_pci_ops); + if (IS_ERR(mvdev)) + return PTR_ERR(mvdev); + dev_set_drvdata(&pdev->dev, &mvdev->core_device); ret = vfio_pci_core_register_device(&mvdev->core_device); if (ret) - goto out_free; + goto out_put_vdev; return 0; -out_free: - mlx5vf_cmd_remove_migratable(mvdev); - vfio_pci_core_uninit_device(&mvdev->core_device); - kfree(mvdev); +out_put_vdev: + vfio_put_device(&mvdev->core_device.vdev); return ret; } @@ -621,9 +652,7 @@ static void mlx5vf_pci_remove(struct pci_dev *pdev) struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev); vfio_pci_core_unregister_device(&mvdev->core_device); - mlx5vf_cmd_remove_migratable(mvdev); - vfio_pci_core_uninit_device(&mvdev->core_device); - kfree(mvdev); + vfio_put_device(&mvdev->core_device.vdev); } static const struct pci_device_id mlx5vf_pci_table[] = { diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 4d1a97415a27..1d4919edfbde 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -25,7 +25,7 @@ #include <linux/types.h> #include <linux/uaccess.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" #define DRIVER_AUTHOR "Alex Williamson <[email protected]>" #define DRIVER_DESC "VFIO PCI - User Level meta-driver" @@ -127,6 +127,8 @@ static int vfio_pci_open_device(struct vfio_device *core_vdev) static const struct vfio_device_ops vfio_pci_ops = { .name = "vfio-pci", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, .open_device = vfio_pci_open_device, .close_device = vfio_pci_core_close_device, .ioctl = vfio_pci_core_ioctl, @@ -146,20 +148,19 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (vfio_pci_is_denylisted(pdev)) return -EINVAL; - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops); + vdev = vfio_alloc_device(vfio_pci_core_device, vdev, &pdev->dev, + &vfio_pci_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); dev_set_drvdata(&pdev->dev, vdev); ret = vfio_pci_core_register_device(vdev); if (ret) - goto out_free; + goto out_put_vdev; return 0; -out_free: - vfio_pci_core_uninit_device(vdev); - kfree(vdev); +out_put_vdev: + vfio_put_device(&vdev->vdev); return ret; } @@ -168,8 +169,7 @@ static void vfio_pci_remove(struct pci_dev *pdev) struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev); vfio_pci_core_unregister_device(vdev); - vfio_pci_core_uninit_device(vdev); - kfree(vdev); + vfio_put_device(&vdev->vdev); } static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 442d3ba4122b..4a350421c5f6 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -26,7 +26,7 @@ #include <linux/vfio.h> #include <linux/slab.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" /* Fake capability ID for standard config space */ #define PCI_CAP_ID_BASIC 0 @@ -1166,7 +1166,7 @@ static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos, flags = le16_to_cpu(*pflags); /* MSI is enabled via ioctl */ - if (!is_msi(vdev)) + if (vdev->irq_type != VFIO_PCI_MSI_IRQ_INDEX) flags &= ~PCI_MSI_FLAGS_ENABLE; /* Check queue size */ diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index c8d3b0450fb3..badc9d828cac 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -28,7 +28,7 @@ #include <linux/nospec.h> #include <linux/sched/mm.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" #define DRIVER_AUTHOR "Alex Williamson <[email protected]>" #define DRIVER_DESC "core driver for VFIO based PCI devices" @@ -41,6 +41,23 @@ static bool disable_idle_d3; static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex); static LIST_HEAD(vfio_pci_sriov_pfs); +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -260,16 +277,189 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat return ret; } +static int vfio_pci_runtime_pm_entry(struct vfio_pci_core_device *vdev, + struct eventfd_ctx *efdctx) +{ + /* + * The vdev power related flags are protected with 'memory_lock' + * semaphore. + */ + vfio_pci_zap_and_down_write_memory_lock(vdev); + if (vdev->pm_runtime_engaged) { + up_write(&vdev->memory_lock); + return -EINVAL; + } + + vdev->pm_runtime_engaged = true; + vdev->pm_wake_eventfd_ctx = efdctx; + pm_runtime_put_noidle(&vdev->pdev->dev); + up_write(&vdev->memory_lock); + + return 0; +} + +static int vfio_pci_core_pm_entry(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz) +{ + struct vfio_pci_core_device *vdev = + container_of(device, struct vfio_pci_core_device, vdev); + int ret; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0); + if (ret != 1) + return ret; + + /* + * Inside vfio_pci_runtime_pm_entry(), only the runtime PM usage count + * will be decremented. The pm_runtime_put() will be invoked again + * while returning from the ioctl and then the device can go into + * runtime suspended state. + */ + return vfio_pci_runtime_pm_entry(vdev, NULL); +} + +static int vfio_pci_core_pm_entry_with_wakeup( + struct vfio_device *device, u32 flags, + struct vfio_device_low_power_entry_with_wakeup __user *arg, + size_t argsz) +{ + struct vfio_pci_core_device *vdev = + container_of(device, struct vfio_pci_core_device, vdev); + struct vfio_device_low_power_entry_with_wakeup entry; + struct eventfd_ctx *efdctx; + int ret; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, + sizeof(entry)); + if (ret != 1) + return ret; + + if (copy_from_user(&entry, arg, sizeof(entry))) + return -EFAULT; + + if (entry.wakeup_eventfd < 0) + return -EINVAL; + + efdctx = eventfd_ctx_fdget(entry.wakeup_eventfd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + ret = vfio_pci_runtime_pm_entry(vdev, efdctx); + if (ret) + eventfd_ctx_put(efdctx); + + return ret; +} + +static void __vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev) +{ + if (vdev->pm_runtime_engaged) { + vdev->pm_runtime_engaged = false; + pm_runtime_get_noresume(&vdev->pdev->dev); + + if (vdev->pm_wake_eventfd_ctx) { + eventfd_ctx_put(vdev->pm_wake_eventfd_ctx); + vdev->pm_wake_eventfd_ctx = NULL; + } + } +} + +static void vfio_pci_runtime_pm_exit(struct vfio_pci_core_device *vdev) +{ + /* + * The vdev power related flags are protected with 'memory_lock' + * semaphore. + */ + down_write(&vdev->memory_lock); + __vfio_pci_runtime_pm_exit(vdev); + up_write(&vdev->memory_lock); +} + +static int vfio_pci_core_pm_exit(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz) +{ + struct vfio_pci_core_device *vdev = + container_of(device, struct vfio_pci_core_device, vdev); + int ret; + + ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET, 0); + if (ret != 1) + return ret; + + /* + * The device is always in the active state here due to pm wrappers + * around ioctls. If the device had entered a low power state and + * pm_wake_eventfd_ctx is valid, vfio_pci_core_runtime_resume() has + * already signaled the eventfd and exited low power mode itself. + * pm_runtime_engaged protects the redundant call here. + */ + vfio_pci_runtime_pm_exit(vdev); + return 0; +} + +#ifdef CONFIG_PM +static int vfio_pci_core_runtime_suspend(struct device *dev) +{ + struct vfio_pci_core_device *vdev = dev_get_drvdata(dev); + + down_write(&vdev->memory_lock); + /* + * The user can move the device into D3hot state before invoking + * power management IOCTL. Move the device into D0 state here and then + * the pci-driver core runtime PM suspend function will move the device + * into the low power state. Also, for the devices which have + * NoSoftRst-, it will help in restoring the original state + * (saved locally in 'vdev->pm_save'). + */ + vfio_pci_set_power_state(vdev, PCI_D0); + up_write(&vdev->memory_lock); + + /* + * If INTx is enabled, then mask INTx before going into the runtime + * suspended state and unmask the same in the runtime resume. + * If INTx has already been masked by the user, then + * vfio_pci_intx_mask() will return false and in that case, INTx + * should not be unmasked in the runtime resume. + */ + vdev->pm_intx_masked = ((vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) && + vfio_pci_intx_mask(vdev)); + + return 0; +} + +static int vfio_pci_core_runtime_resume(struct device *dev) +{ + struct vfio_pci_core_device *vdev = dev_get_drvdata(dev); + + /* + * Resume with a pm_wake_eventfd_ctx signals the eventfd and exit + * low power mode. + */ + down_write(&vdev->memory_lock); + if (vdev->pm_wake_eventfd_ctx) { + eventfd_signal(vdev->pm_wake_eventfd_ctx, 1); + __vfio_pci_runtime_pm_exit(vdev); + } + up_write(&vdev->memory_lock); + + if (vdev->pm_intx_masked) + vfio_pci_intx_unmask(vdev); + + return 0; +} +#endif /* CONFIG_PM */ + /* - * The dev_pm_ops needs to be provided to make pci-driver runtime PM working, - * so use structure without any callbacks. - * * The pci-driver core runtime PM routines always save the device state * before going into suspended state. If the device is going into low power * state with only with runtime PM ops, then no explicit handling is needed * for the devices which have NoSoftRst-. */ -static const struct dev_pm_ops vfio_pci_core_pm_ops = { }; +static const struct dev_pm_ops vfio_pci_core_pm_ops = { + SET_RUNTIME_PM_OPS(vfio_pci_core_runtime_suspend, + vfio_pci_core_runtime_resume, + NULL) +}; int vfio_pci_core_enable(struct vfio_pci_core_device *vdev) { @@ -371,6 +561,18 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev) /* * This function can be invoked while the power state is non-D0. + * This non-D0 power state can be with or without runtime PM. + * vfio_pci_runtime_pm_exit() will internally increment the usage + * count corresponding to pm_runtime_put() called during low power + * feature entry and then pm_runtime_resume() will wake up the device, + * if the device has already gone into the suspended state. Otherwise, + * the vfio_pci_set_power_state() will change the device power state + * to D0. + */ + vfio_pci_runtime_pm_exit(vdev); + pm_runtime_resume(&pdev->dev); + + /* * This function calls __pci_reset_function_locked() which internally * can use pci_pm_reset() for the function reset. pci_pm_reset() will * fail if the power state is non-D0. Also, for the devices which @@ -645,10 +847,10 @@ static int msix_mmappable_cap(struct vfio_pci_core_device *vdev, return vfio_info_add_capability(caps, &header, sizeof(header)); } -int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, - unsigned int type, unsigned int subtype, - const struct vfio_pci_regops *ops, - size_t size, u32 flags, void *data) +int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data) { struct vfio_pci_region *region; @@ -670,508 +872,532 @@ int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, return 0; } -EXPORT_SYMBOL_GPL(vfio_pci_register_dev_region); +EXPORT_SYMBOL_GPL(vfio_pci_core_register_dev_region); -long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, - unsigned long arg) +static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev, + struct vfio_device_info __user *arg) { - struct vfio_pci_core_device *vdev = - container_of(core_vdev, struct vfio_pci_core_device, vdev); - unsigned long minsz; - - if (cmd == VFIO_DEVICE_GET_INFO) { - struct vfio_device_info info; - struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; - unsigned long capsz; - int ret; - - minsz = offsetofend(struct vfio_device_info, num_irqs); + unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs); + struct vfio_device_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + unsigned long capsz; + int ret; - /* For backward compatibility, cannot require this */ - capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); + /* For backward compatibility, cannot require this */ + capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); - if (copy_from_user(&info, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&info, arg, minsz)) + return -EFAULT; - if (info.argsz < minsz) - return -EINVAL; + if (info.argsz < minsz) + return -EINVAL; - if (info.argsz >= capsz) { - minsz = capsz; - info.cap_offset = 0; - } + if (info.argsz >= capsz) { + minsz = capsz; + info.cap_offset = 0; + } - info.flags = VFIO_DEVICE_FLAGS_PCI; + info.flags = VFIO_DEVICE_FLAGS_PCI; - if (vdev->reset_works) - info.flags |= VFIO_DEVICE_FLAGS_RESET; + if (vdev->reset_works) + info.flags |= VFIO_DEVICE_FLAGS_RESET; - info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; - info.num_irqs = VFIO_PCI_NUM_IRQS; + info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions; + info.num_irqs = VFIO_PCI_NUM_IRQS; - ret = vfio_pci_info_zdev_add_caps(vdev, &caps); - if (ret && ret != -ENODEV) { - pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n"); - return ret; - } + ret = vfio_pci_info_zdev_add_caps(vdev, &caps); + if (ret && ret != -ENODEV) { + pci_warn(vdev->pdev, + "Failed to setup zPCI info capabilities\n"); + return ret; + } - if (caps.size) { - info.flags |= VFIO_DEVICE_FLAGS_CAPS; - if (info.argsz < sizeof(info) + caps.size) { - info.argsz = sizeof(info) + caps.size; - } else { - vfio_info_cap_shift(&caps, sizeof(info)); - if (copy_to_user((void __user *)arg + - sizeof(info), caps.buf, - caps.size)) { - kfree(caps.buf); - return -EFAULT; - } - info.cap_offset = sizeof(info); + if (caps.size) { + info.flags |= VFIO_DEVICE_FLAGS_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user(arg + 1, caps.buf, caps.size)) { + kfree(caps.buf); + return -EFAULT; } - - kfree(caps.buf); + info.cap_offset = sizeof(*arg); } - return copy_to_user((void __user *)arg, &info, minsz) ? - -EFAULT : 0; + kfree(caps.buf); + } - } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { - struct pci_dev *pdev = vdev->pdev; - struct vfio_region_info info; - struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; - int i, ret; + return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; +} - minsz = offsetofend(struct vfio_region_info, offset); +static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev, + struct vfio_region_info __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_region_info, offset); + struct pci_dev *pdev = vdev->pdev; + struct vfio_region_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + int i, ret; - if (copy_from_user(&info, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&info, arg, minsz)) + return -EFAULT; - if (info.argsz < minsz) - return -EINVAL; + if (info.argsz < minsz) + return -EINVAL; - switch (info.index) { - case VFIO_PCI_CONFIG_REGION_INDEX: - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = pdev->cfg_size; - info.flags = VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE; + switch (info.index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pdev->cfg_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + info.flags = 0; break; - case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = pci_resource_len(pdev, info.index); - if (!info.size) { - info.flags = 0; - break; - } + } - info.flags = VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE; - if (vdev->bar_mmap_supported[info.index]) { - info.flags |= VFIO_REGION_INFO_FLAG_MMAP; - if (info.index == vdev->msix_bar) { - ret = msix_mmappable_cap(vdev, &caps); - if (ret) - return ret; - } + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (vdev->bar_mmap_supported[info.index]) { + info.flags |= VFIO_REGION_INFO_FLAG_MMAP; + if (info.index == vdev->msix_bar) { + ret = msix_mmappable_cap(vdev, &caps); + if (ret) + return ret; } + } - break; - case VFIO_PCI_ROM_REGION_INDEX: - { - void __iomem *io; - size_t size; - u16 cmd; - - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.flags = 0; + break; + case VFIO_PCI_ROM_REGION_INDEX: { + void __iomem *io; + size_t size; + u16 cmd; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.flags = 0; + + /* Report the BAR size, not the ROM size */ + info.size = pci_resource_len(pdev, info.index); + if (!info.size) { + /* Shadow ROMs appear as PCI option ROMs */ + if (pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW) + info.size = 0x20000; + else + break; + } - /* Report the BAR size, not the ROM size */ - info.size = pci_resource_len(pdev, info.index); - if (!info.size) { - /* Shadow ROMs appear as PCI option ROMs */ - if (pdev->resource[PCI_ROM_RESOURCE].flags & - IORESOURCE_ROM_SHADOW) - info.size = 0x20000; - else - break; - } + /* + * Is it really there? Enable memory decode for implicit access + * in pci_map_rom(). + */ + cmd = vfio_pci_memory_lock_and_enable(vdev); + io = pci_map_rom(pdev, &size); + if (io) { + info.flags = VFIO_REGION_INFO_FLAG_READ; + pci_unmap_rom(pdev, io); + } else { + info.size = 0; + } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - /* - * Is it really there? Enable memory decode for - * implicit access in pci_map_rom(). - */ - cmd = vfio_pci_memory_lock_and_enable(vdev); - io = pci_map_rom(pdev, &size); - if (io) { - info.flags = VFIO_REGION_INFO_FLAG_READ; - pci_unmap_rom(pdev, io); - } else { - info.size = 0; - } - vfio_pci_memory_unlock_and_restore(vdev, cmd); + break; + } + case VFIO_PCI_VGA_REGION_INDEX: + if (!vdev->has_vga) + return -EINVAL; - break; - } - case VFIO_PCI_VGA_REGION_INDEX: - if (!vdev->has_vga) - return -EINVAL; + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = 0xc0000; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = 0xc0000; - info.flags = VFIO_REGION_INFO_FLAG_READ | - VFIO_REGION_INFO_FLAG_WRITE; + break; + default: { + struct vfio_region_info_cap_type cap_type = { + .header.id = VFIO_REGION_INFO_CAP_TYPE, + .header.version = 1 + }; - break; - default: - { - struct vfio_region_info_cap_type cap_type = { - .header.id = VFIO_REGION_INFO_CAP_TYPE, - .header.version = 1 }; + if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) + return -EINVAL; + info.index = array_index_nospec( + info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions); - if (info.index >= - VFIO_PCI_NUM_REGIONS + vdev->num_regions) - return -EINVAL; - info.index = array_index_nospec(info.index, - VFIO_PCI_NUM_REGIONS + - vdev->num_regions); + i = info.index - VFIO_PCI_NUM_REGIONS; - i = info.index - VFIO_PCI_NUM_REGIONS; + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + info.size = vdev->region[i].size; + info.flags = vdev->region[i].flags; - info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = vdev->region[i].size; - info.flags = vdev->region[i].flags; + cap_type.type = vdev->region[i].type; + cap_type.subtype = vdev->region[i].subtype; - cap_type.type = vdev->region[i].type; - cap_type.subtype = vdev->region[i].subtype; + ret = vfio_info_add_capability(&caps, &cap_type.header, + sizeof(cap_type)); + if (ret) + return ret; - ret = vfio_info_add_capability(&caps, &cap_type.header, - sizeof(cap_type)); + if (vdev->region[i].ops->add_capability) { + ret = vdev->region[i].ops->add_capability( + vdev, &vdev->region[i], &caps); if (ret) return ret; - - if (vdev->region[i].ops->add_capability) { - ret = vdev->region[i].ops->add_capability(vdev, - &vdev->region[i], &caps); - if (ret) - return ret; - } - } } + } + } - if (caps.size) { - info.flags |= VFIO_REGION_INFO_FLAG_CAPS; - if (info.argsz < sizeof(info) + caps.size) { - info.argsz = sizeof(info) + caps.size; - info.cap_offset = 0; - } else { - vfio_info_cap_shift(&caps, sizeof(info)); - if (copy_to_user((void __user *)arg + - sizeof(info), caps.buf, - caps.size)) { - kfree(caps.buf); - return -EFAULT; - } - info.cap_offset = sizeof(info); + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user(arg + 1, caps.buf, caps.size)) { + kfree(caps.buf); + return -EFAULT; } - - kfree(caps.buf); + info.cap_offset = sizeof(*arg); } - return copy_to_user((void __user *)arg, &info, minsz) ? - -EFAULT : 0; + kfree(caps.buf); + } - } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { - struct vfio_irq_info info; + return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; +} - minsz = offsetofend(struct vfio_irq_info, count); +static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev, + struct vfio_irq_info __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_irq_info, count); + struct vfio_irq_info info; - if (copy_from_user(&info, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&info, arg, minsz)) + return -EFAULT; - if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) - return -EINVAL; + if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS) + return -EINVAL; - switch (info.index) { - case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: - case VFIO_PCI_REQ_IRQ_INDEX: + switch (info.index) { + case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + case VFIO_PCI_ERR_IRQ_INDEX: + if (pci_is_pcie(vdev->pdev)) break; - case VFIO_PCI_ERR_IRQ_INDEX: - if (pci_is_pcie(vdev->pdev)) - break; - fallthrough; - default: - return -EINVAL; - } - - info.flags = VFIO_IRQ_INFO_EVENTFD; - - info.count = vfio_pci_get_irq_count(vdev, info.index); + fallthrough; + default: + return -EINVAL; + } - if (info.index == VFIO_PCI_INTX_IRQ_INDEX) - info.flags |= (VFIO_IRQ_INFO_MASKABLE | - VFIO_IRQ_INFO_AUTOMASKED); - else - info.flags |= VFIO_IRQ_INFO_NORESIZE; + info.flags = VFIO_IRQ_INFO_EVENTFD; - return copy_to_user((void __user *)arg, &info, minsz) ? - -EFAULT : 0; + info.count = vfio_pci_get_irq_count(vdev, info.index); - } else if (cmd == VFIO_DEVICE_SET_IRQS) { - struct vfio_irq_set hdr; - u8 *data = NULL; - int max, ret = 0; - size_t data_size = 0; + if (info.index == VFIO_PCI_INTX_IRQ_INDEX) + info.flags |= + (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED); + else + info.flags |= VFIO_IRQ_INFO_NORESIZE; - minsz = offsetofend(struct vfio_irq_set, count); + return copy_to_user(arg, &info, minsz) ? -EFAULT : 0; +} - if (copy_from_user(&hdr, (void __user *)arg, minsz)) - return -EFAULT; +static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev, + struct vfio_irq_set __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_irq_set, count); + struct vfio_irq_set hdr; + u8 *data = NULL; + int max, ret = 0; + size_t data_size = 0; - max = vfio_pci_get_irq_count(vdev, hdr.index); + if (copy_from_user(&hdr, arg, minsz)) + return -EFAULT; - ret = vfio_set_irqs_validate_and_prepare(&hdr, max, - VFIO_PCI_NUM_IRQS, &data_size); - if (ret) - return ret; + max = vfio_pci_get_irq_count(vdev, hdr.index); - if (data_size) { - data = memdup_user((void __user *)(arg + minsz), - data_size); - if (IS_ERR(data)) - return PTR_ERR(data); - } + ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS, + &data_size); + if (ret) + return ret; - mutex_lock(&vdev->igate); + if (data_size) { + data = memdup_user(&arg->data, data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } - ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, - hdr.start, hdr.count, data); + mutex_lock(&vdev->igate); - mutex_unlock(&vdev->igate); - kfree(data); + ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start, + hdr.count, data); - return ret; + mutex_unlock(&vdev->igate); + kfree(data); - } else if (cmd == VFIO_DEVICE_RESET) { - int ret; + return ret; +} - if (!vdev->reset_works) - return -EINVAL; +static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev, + void __user *arg) +{ + int ret; - vfio_pci_zap_and_down_write_memory_lock(vdev); + if (!vdev->reset_works) + return -EINVAL; - /* - * This function can be invoked while the power state is non-D0. - * If pci_try_reset_function() has been called while the power - * state is non-D0, then pci_try_reset_function() will - * internally set the power state to D0 without vfio driver - * involvement. For the devices which have NoSoftRst-, the - * reset function can cause the PCI config space reset without - * restoring the original state (saved locally in - * 'vdev->pm_save'). - */ - vfio_pci_set_power_state(vdev, PCI_D0); + vfio_pci_zap_and_down_write_memory_lock(vdev); - ret = pci_try_reset_function(vdev->pdev); - up_write(&vdev->memory_lock); + /* + * This function can be invoked while the power state is non-D0. If + * pci_try_reset_function() has been called while the power state is + * non-D0, then pci_try_reset_function() will internally set the power + * state to D0 without vfio driver involvement. For the devices which + * have NoSoftRst-, the reset function can cause the PCI config space + * reset without restoring the original state (saved locally in + * 'vdev->pm_save'). + */ + vfio_pci_set_power_state(vdev, PCI_D0); - return ret; + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); - } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { - struct vfio_pci_hot_reset_info hdr; - struct vfio_pci_fill_info fill = { 0 }; - struct vfio_pci_dependent_device *devices = NULL; - bool slot = false; - int ret = 0; + return ret; +} - minsz = offsetofend(struct vfio_pci_hot_reset_info, count); +static int vfio_pci_ioctl_get_pci_hot_reset_info( + struct vfio_pci_core_device *vdev, + struct vfio_pci_hot_reset_info __user *arg) +{ + unsigned long minsz = + offsetofend(struct vfio_pci_hot_reset_info, count); + struct vfio_pci_hot_reset_info hdr; + struct vfio_pci_fill_info fill = { 0 }; + struct vfio_pci_dependent_device *devices = NULL; + bool slot = false; + int ret = 0; - if (copy_from_user(&hdr, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&hdr, arg, minsz)) + return -EFAULT; - if (hdr.argsz < minsz) - return -EINVAL; + if (hdr.argsz < minsz) + return -EINVAL; - hdr.flags = 0; + hdr.flags = 0; - /* Can we do a slot or bus reset or neither? */ - if (!pci_probe_reset_slot(vdev->pdev->slot)) - slot = true; - else if (pci_probe_reset_bus(vdev->pdev->bus)) - return -ENODEV; + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; - /* How many devices are affected? */ - ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, - vfio_pci_count_devs, - &fill.max, slot); - if (ret) - return ret; + /* How many devices are affected? */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, + &fill.max, slot); + if (ret) + return ret; - WARN_ON(!fill.max); /* Should always be at least one */ + WARN_ON(!fill.max); /* Should always be at least one */ - /* - * If there's enough space, fill it now, otherwise return - * -ENOSPC and the number of devices affected. - */ - if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { - ret = -ENOSPC; - hdr.count = fill.max; - goto reset_info_exit; - } + /* + * If there's enough space, fill it now, otherwise return -ENOSPC and + * the number of devices affected. + */ + if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) { + ret = -ENOSPC; + hdr.count = fill.max; + goto reset_info_exit; + } - devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); - if (!devices) - return -ENOMEM; + devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL); + if (!devices) + return -ENOMEM; - fill.devices = devices; + fill.devices = devices; - ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, - vfio_pci_fill_devs, - &fill, slot); + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs, + &fill, slot); - /* - * If a device was removed between counting and filling, - * we may come up short of fill.max. If a device was - * added, we'll have a return of -EAGAIN above. - */ - if (!ret) - hdr.count = fill.cur; + /* + * If a device was removed between counting and filling, we may come up + * short of fill.max. If a device was added, we'll have a return of + * -EAGAIN above. + */ + if (!ret) + hdr.count = fill.cur; reset_info_exit: - if (copy_to_user((void __user *)arg, &hdr, minsz)) + if (copy_to_user(arg, &hdr, minsz)) + ret = -EFAULT; + + if (!ret) { + if (copy_to_user(&arg->devices, devices, + hdr.count * sizeof(*devices))) ret = -EFAULT; + } - if (!ret) { - if (copy_to_user((void __user *)(arg + minsz), devices, - hdr.count * sizeof(*devices))) - ret = -EFAULT; - } + kfree(devices); + return ret; +} - kfree(devices); - return ret; +static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev, + struct vfio_pci_hot_reset __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count); + struct vfio_pci_hot_reset hdr; + int32_t *group_fds; + struct file **files; + struct vfio_pci_group_info info; + bool slot = false; + int file_idx, count = 0, ret = 0; - } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { - struct vfio_pci_hot_reset hdr; - int32_t *group_fds; - struct file **files; - struct vfio_pci_group_info info; - bool slot = false; - int file_idx, count = 0, ret = 0; + if (copy_from_user(&hdr, arg, minsz)) + return -EFAULT; - minsz = offsetofend(struct vfio_pci_hot_reset, count); + if (hdr.argsz < minsz || hdr.flags) + return -EINVAL; - if (copy_from_user(&hdr, (void __user *)arg, minsz)) - return -EFAULT; + /* Can we do a slot or bus reset or neither? */ + if (!pci_probe_reset_slot(vdev->pdev->slot)) + slot = true; + else if (pci_probe_reset_bus(vdev->pdev->bus)) + return -ENODEV; - if (hdr.argsz < minsz || hdr.flags) - return -EINVAL; + /* + * We can't let userspace give us an arbitrarily large buffer to copy, + * so verify how many we think there could be. Note groups can have + * multiple devices so one group per device is the max. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs, + &count, slot); + if (ret) + return ret; - /* Can we do a slot or bus reset or neither? */ - if (!pci_probe_reset_slot(vdev->pdev->slot)) - slot = true; - else if (pci_probe_reset_bus(vdev->pdev->bus)) - return -ENODEV; + /* Somewhere between 1 and count is OK */ + if (!hdr.count || hdr.count > count) + return -EINVAL; - /* - * We can't let userspace give us an arbitrarily large - * buffer to copy, so verify how many we think there - * could be. Note groups can have multiple devices so - * one group per device is the max. - */ - ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, - vfio_pci_count_devs, - &count, slot); - if (ret) - return ret; + group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); + files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL); + if (!group_fds || !files) { + kfree(group_fds); + kfree(files); + return -ENOMEM; + } - /* Somewhere between 1 and count is OK */ - if (!hdr.count || hdr.count > count) - return -EINVAL; + if (copy_from_user(group_fds, arg->group_fds, + hdr.count * sizeof(*group_fds))) { + kfree(group_fds); + kfree(files); + return -EFAULT; + } - group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); - files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL); - if (!group_fds || !files) { - kfree(group_fds); - kfree(files); - return -ENOMEM; - } + /* + * For each group_fd, get the group through the vfio external user + * interface and store the group and iommu ID. This ensures the group + * is held across the reset. + */ + for (file_idx = 0; file_idx < hdr.count; file_idx++) { + struct file *file = fget(group_fds[file_idx]); - if (copy_from_user(group_fds, (void __user *)(arg + minsz), - hdr.count * sizeof(*group_fds))) { - kfree(group_fds); - kfree(files); - return -EFAULT; + if (!file) { + ret = -EBADF; + break; } - /* - * For each group_fd, get the group through the vfio external - * user interface and store the group and iommu ID. This - * ensures the group is held across the reset. - */ - for (file_idx = 0; file_idx < hdr.count; file_idx++) { - struct file *file = fget(group_fds[file_idx]); - - if (!file) { - ret = -EBADF; - break; - } - - /* Ensure the FD is a vfio group FD.*/ - if (!vfio_file_iommu_group(file)) { - fput(file); - ret = -EINVAL; - break; - } - - files[file_idx] = file; + /* Ensure the FD is a vfio group FD.*/ + if (!vfio_file_is_group(file)) { + fput(file); + ret = -EINVAL; + break; } - kfree(group_fds); + files[file_idx] = file; + } - /* release reference to groups on error */ - if (ret) - goto hot_reset_release; + kfree(group_fds); + + /* release reference to groups on error */ + if (ret) + goto hot_reset_release; - info.count = hdr.count; - info.files = files; + info.count = hdr.count; + info.files = files; - ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info); + ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info); hot_reset_release: - for (file_idx--; file_idx >= 0; file_idx--) - fput(files[file_idx]); + for (file_idx--; file_idx >= 0; file_idx--) + fput(files[file_idx]); - kfree(files); - return ret; - } else if (cmd == VFIO_DEVICE_IOEVENTFD) { - struct vfio_device_ioeventfd ioeventfd; - int count; + kfree(files); + return ret; +} - minsz = offsetofend(struct vfio_device_ioeventfd, fd); +static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev, + struct vfio_device_ioeventfd __user *arg) +{ + unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd); + struct vfio_device_ioeventfd ioeventfd; + int count; - if (copy_from_user(&ioeventfd, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&ioeventfd, arg, minsz)) + return -EFAULT; - if (ioeventfd.argsz < minsz) - return -EINVAL; + if (ioeventfd.argsz < minsz) + return -EINVAL; - if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) - return -EINVAL; + if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK) + return -EINVAL; - count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; + count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK; - if (hweight8(count) != 1 || ioeventfd.fd < -1) - return -EINVAL; + if (hweight8(count) != 1 || ioeventfd.fd < -1) + return -EINVAL; - return vfio_pci_ioeventfd(vdev, ioeventfd.offset, - ioeventfd.data, count, ioeventfd.fd); + return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count, + ioeventfd.fd); +} + +long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg) +{ + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + void __user *uarg = (void __user *)arg; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + return vfio_pci_ioctl_get_info(vdev, uarg); + case VFIO_DEVICE_GET_IRQ_INFO: + return vfio_pci_ioctl_get_irq_info(vdev, uarg); + case VFIO_DEVICE_GET_PCI_HOT_RESET_INFO: + return vfio_pci_ioctl_get_pci_hot_reset_info(vdev, uarg); + case VFIO_DEVICE_GET_REGION_INFO: + return vfio_pci_ioctl_get_region_info(vdev, uarg); + case VFIO_DEVICE_IOEVENTFD: + return vfio_pci_ioctl_ioeventfd(vdev, uarg); + case VFIO_DEVICE_PCI_HOT_RESET: + return vfio_pci_ioctl_pci_hot_reset(vdev, uarg); + case VFIO_DEVICE_RESET: + return vfio_pci_ioctl_reset(vdev, uarg); + case VFIO_DEVICE_SET_IRQS: + return vfio_pci_ioctl_set_irqs(vdev, uarg); + default: + return -ENOTTY; } - return -ENOTTY; } EXPORT_SYMBOL_GPL(vfio_pci_core_ioctl); static int vfio_pci_core_feature_token(struct vfio_device *device, u32 flags, - void __user *arg, size_t argsz) + uuid_t __user *arg, size_t argsz) { struct vfio_pci_core_device *vdev = container_of(device, struct vfio_pci_core_device, vdev); @@ -1202,6 +1428,13 @@ int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz) { switch (flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY: + return vfio_pci_core_pm_entry(device, flags, arg, argsz); + case VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP: + return vfio_pci_core_pm_entry_with_wakeup(device, flags, + arg, argsz); + case VFIO_DEVICE_FEATURE_LOW_POWER_EXIT: + return vfio_pci_core_pm_exit(device, flags, arg, argsz); case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN: return vfio_pci_core_feature_token(device, flags, arg, argsz); default: @@ -1214,31 +1447,47 @@ static ssize_t vfio_pci_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + int ret; if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; + ret = pm_runtime_resume_and_get(&vdev->pdev->dev); + if (ret) { + pci_info_ratelimited(vdev->pdev, "runtime resume failed %d\n", + ret); + return -EIO; + } + switch (index) { case VFIO_PCI_CONFIG_REGION_INDEX: - return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite); + ret = vfio_pci_config_rw(vdev, buf, count, ppos, iswrite); + break; case VFIO_PCI_ROM_REGION_INDEX: if (iswrite) - return -EINVAL; - return vfio_pci_bar_rw(vdev, buf, count, ppos, false); + ret = -EINVAL; + else + ret = vfio_pci_bar_rw(vdev, buf, count, ppos, false); + break; case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: - return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite); + ret = vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite); + break; case VFIO_PCI_VGA_REGION_INDEX: - return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite); + ret = vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite); + break; + default: index -= VFIO_PCI_NUM_REGIONS; - return vdev->region[index].ops->rw(vdev, buf, + ret = vdev->region[index].ops->rw(vdev, buf, count, ppos, iswrite); + break; } - return -EINVAL; + pm_runtime_put(&vdev->pdev->dev); + return ret; } ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, @@ -1433,7 +1682,11 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) mutex_lock(&vdev->vma_lock); down_read(&vdev->memory_lock); - if (!__vfio_pci_memory_enabled(vdev)) { + /* + * Memory region cannot be accessed if the low power feature is engaged + * or memory access is disabled. + */ + if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) { ret = VM_FAULT_SIGBUS; goto up_out; } @@ -1825,12 +2078,12 @@ static void vfio_pci_vga_uninit(struct vfio_pci_core_device *vdev) VGA_RSRC_LEGACY_MEM); } -void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, - struct pci_dev *pdev, - const struct vfio_device_ops *vfio_pci_ops) +int vfio_pci_core_init_dev(struct vfio_device *core_vdev) { - vfio_init_group_dev(&vdev->vdev, &pdev->dev, vfio_pci_ops); - vdev->pdev = pdev; + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + + vdev->pdev = to_pci_dev(core_vdev->dev); vdev->irq_type = VFIO_PCI_NUM_IRQS; mutex_init(&vdev->igate); spin_lock_init(&vdev->irqlock); @@ -1841,19 +2094,24 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, INIT_LIST_HEAD(&vdev->vma_list); INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); + + return 0; } -EXPORT_SYMBOL_GPL(vfio_pci_core_init_device); +EXPORT_SYMBOL_GPL(vfio_pci_core_init_dev); -void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev) +void vfio_pci_core_release_dev(struct vfio_device *core_vdev) { + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + mutex_destroy(&vdev->igate); mutex_destroy(&vdev->ioeventfds_lock); mutex_destroy(&vdev->vma_lock); - vfio_uninit_group_dev(&vdev->vdev); kfree(vdev->region); kfree(vdev->pm_save); + vfio_free_device(core_vdev); } -EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device); +EXPORT_SYMBOL_GPL(vfio_pci_core_release_dev); int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) { @@ -1875,6 +2133,11 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) return -EINVAL; } + if (vdev->vdev.log_ops && !(vdev->vdev.log_ops->log_start && + vdev->vdev.log_ops->log_stop && + vdev->vdev.log_ops->log_read_and_clear)) + return -EINVAL; + /* * Prevent binding to PFs with VFs enabled, the VFs might be in use * by the host or other users. We cannot capture the VFs if they @@ -2148,6 +2411,15 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, goto err_unlock; } + /* + * Some of the devices in the dev_set can be in the runtime suspended + * state. Increment the usage count for all the devices in the dev_set + * before reset and decrement the same after reset. + */ + ret = vfio_pci_dev_set_pm_runtime_get(dev_set); + if (ret) + goto err_unlock; + list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) { /* * Test whether all the affected devices are contained by the @@ -2203,6 +2475,9 @@ err_undo: else mutex_unlock(&cur->vma_lock); } + + list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) + pm_runtime_put(&cur->pdev->dev); err_unlock: mutex_unlock(&dev_set->lock); return ret; diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index 352c725ccf18..5e6ca5926954 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -15,7 +15,7 @@ #include <linux/uaccess.h> #include <linux/vfio.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" #define OPREGION_SIGNATURE "IntelGraphicsMem" #define OPREGION_SIZE (8 * 1024) @@ -257,7 +257,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev) } } - ret = vfio_pci_register_dev_region(vdev, + ret = vfio_pci_core_register_dev_region(vdev, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, opregionvbt); @@ -402,7 +402,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev) return -EINVAL; } - ret = vfio_pci_register_dev_region(vdev, + ret = vfio_pci_core_register_dev_region(vdev, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG, &vfio_pci_igd_cfg_regops, host_bridge->cfg_size, @@ -422,7 +422,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev) return -EINVAL; } - ret = vfio_pci_register_dev_region(vdev, + ret = vfio_pci_core_register_dev_region(vdev, PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE, VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG, &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size, diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 6069a11fb51a..40c3d7cf163f 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -20,7 +20,33 @@ #include <linux/wait.h> #include <linux/slab.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +static bool irq_is(struct vfio_pci_core_device *vdev, int type) +{ + return vdev->irq_type == type; +} + +static bool is_intx(struct vfio_pci_core_device *vdev) +{ + return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX; +} + +static bool is_irq_none(struct vfio_pci_core_device *vdev) +{ + return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX || + vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX || + vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX); +} /* * INTx @@ -33,10 +59,12 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused) eventfd_signal(vdev->ctx[0].trigger, 1); } -void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) +/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */ +bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; unsigned long flags; + bool masked_changed = false; spin_lock_irqsave(&vdev->irqlock, flags); @@ -60,9 +88,11 @@ void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev) disable_irq_nosync(pdev->irq); vdev->ctx[0].masked = true; + masked_changed = true; } spin_unlock_irqrestore(&vdev->irqlock, flags); + return masked_changed; } /* diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h new file mode 100644 index 000000000000..5e4fa69aee16 --- /dev/null +++ b/drivers/vfio/pci/vfio_pci_priv.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef VFIO_PCI_PRIV_H +#define VFIO_PCI_PRIV_H + +#include <linux/vfio_pci_core.h> + +/* Special capability IDs predefined access */ +#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ +#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ + +/* Cap maximum number of ioeventfds per device (arbitrary) */ +#define VFIO_PCI_IOEVENTFD_MAX 1000 + +struct vfio_pci_ioeventfd { + struct list_head next; + struct vfio_pci_core_device *vdev; + struct virqfd *virqfd; + void __iomem *addr; + uint64_t data; + loff_t pos; + int bar; + int count; + bool test_mem; +}; + +bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); +void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); + +int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags, + unsigned index, unsigned start, unsigned count, + void *data); + +ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +#ifdef CONFIG_VFIO_PCI_VGA +ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); +#else +static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite) +{ + return -EINVAL; +} +#endif + +int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, + uint64_t data, int count, int fd); + +int vfio_pci_init_perm_bits(void); +void vfio_pci_uninit_perm_bits(void); + +int vfio_config_init(struct vfio_pci_core_device *vdev); +void vfio_config_free(struct vfio_pci_core_device *vdev); + +int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, + pci_power_t state); + +bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev); +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, + u16 cmd); + +#ifdef CONFIG_VFIO_PCI_IGD +int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); +#else +static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_VFIO_PCI_ZDEV_KVM +int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps); +int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev); +void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev); +#else +static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps) +{ + return -ENODEV; +} + +static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev) +{ + return 0; +} + +static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev) +{} +#endif + +static inline bool vfio_pci_is_vga(struct pci_dev *pdev) +{ + return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; +} + +#endif diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 82ac1569deb0..e352a033b4ae 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -17,7 +17,7 @@ #include <linux/vfio.h> #include <linux/vgaarb.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" #ifdef __LITTLE_ENDIAN #define vfio_ioread64 ioread64 @@ -412,8 +412,8 @@ static void vfio_pci_ioeventfd_thread(void *opaque, void *unused) vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem); } -long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, - uint64_t data, int count, int fd) +int vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, + uint64_t data, int count, int fd) { struct pci_dev *pdev = vdev->pdev; loff_t pos = offset & VFIO_PCI_OFFSET_MASK; diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c index 0cbdcd14f1c8..0990fdb146b7 100644 --- a/drivers/vfio/pci/vfio_pci_zdev.c +++ b/drivers/vfio/pci/vfio_pci_zdev.c @@ -15,7 +15,7 @@ #include <asm/pci_clp.h> #include <asm/pci_io.h> -#include <linux/vfio_pci_core.h> +#include "vfio_pci_priv.h" /* * Add the Base PCI Function information to the device info region. diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index 1aaa4f721bd2..eaea63e5294c 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/vfio.h> +#include <linux/pm_runtime.h> #include <linux/amba/bus.h> #include "vfio_platform_private.h" @@ -40,20 +41,16 @@ static int get_amba_irq(struct vfio_platform_device *vdev, int i) return ret ? ret : -ENXIO; } -static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) +static int vfio_amba_init_dev(struct vfio_device *core_vdev) { - struct vfio_platform_device *vdev; + struct vfio_platform_device *vdev = + container_of(core_vdev, struct vfio_platform_device, vdev); + struct amba_device *adev = to_amba_device(core_vdev->dev); int ret; - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; - vdev->name = kasprintf(GFP_KERNEL, "vfio-amba-%08x", adev->periphid); - if (!vdev->name) { - kfree(vdev); + if (!vdev->name) return -ENOMEM; - } vdev->opaque = (void *) adev; vdev->flags = VFIO_DEVICE_FLAGS_AMBA; @@ -61,26 +58,67 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) vdev->get_irq = get_amba_irq; vdev->reset_required = false; - ret = vfio_platform_probe_common(vdev, &adev->dev); - if (ret) { + ret = vfio_platform_init_common(vdev); + if (ret) kfree(vdev->name); - kfree(vdev); - return ret; - } + return ret; +} + +static const struct vfio_device_ops vfio_amba_ops; +static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct vfio_platform_device *vdev; + int ret; + + vdev = vfio_alloc_device(vfio_platform_device, vdev, &adev->dev, + &vfio_amba_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); + ret = vfio_register_group_dev(&vdev->vdev); + if (ret) + goto out_put_vdev; + + pm_runtime_enable(&adev->dev); dev_set_drvdata(&adev->dev, vdev); return 0; + +out_put_vdev: + vfio_put_device(&vdev->vdev); + return ret; +} + +static void vfio_amba_release_dev(struct vfio_device *core_vdev) +{ + struct vfio_platform_device *vdev = + container_of(core_vdev, struct vfio_platform_device, vdev); + + vfio_platform_release_common(vdev); + kfree(vdev->name); + vfio_free_device(core_vdev); } static void vfio_amba_remove(struct amba_device *adev) { struct vfio_platform_device *vdev = dev_get_drvdata(&adev->dev); - vfio_platform_remove_common(vdev); - kfree(vdev->name); - kfree(vdev); + vfio_unregister_group_dev(&vdev->vdev); + pm_runtime_disable(vdev->device); + vfio_put_device(&vdev->vdev); } +static const struct vfio_device_ops vfio_amba_ops = { + .name = "vfio-amba", + .init = vfio_amba_init_dev, + .release = vfio_amba_release_dev, + .open_device = vfio_platform_open_device, + .close_device = vfio_platform_close_device, + .ioctl = vfio_platform_ioctl, + .read = vfio_platform_read, + .write = vfio_platform_write, + .mmap = vfio_platform_mmap, +}; + static const struct amba_id pl330_ids[] = { { 0, 0 }, }; diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index 04f40c5acfd6..82cedcebfd90 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -7,6 +7,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/vfio.h> +#include <linux/pm_runtime.h> #include <linux/platform_device.h> #include "vfio_platform_private.h" @@ -36,14 +37,11 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i) return platform_get_irq_optional(pdev, i); } -static int vfio_platform_probe(struct platform_device *pdev) +static int vfio_platform_init_dev(struct vfio_device *core_vdev) { - struct vfio_platform_device *vdev; - int ret; - - vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); - if (!vdev) - return -ENOMEM; + struct vfio_platform_device *vdev = + container_of(core_vdev, struct vfio_platform_device, vdev); + struct platform_device *pdev = to_platform_device(core_vdev->dev); vdev->opaque = (void *) pdev; vdev->name = pdev->name; @@ -52,24 +50,64 @@ static int vfio_platform_probe(struct platform_device *pdev) vdev->get_irq = get_platform_irq; vdev->reset_required = reset_required; - ret = vfio_platform_probe_common(vdev, &pdev->dev); - if (ret) { - kfree(vdev); - return ret; - } + return vfio_platform_init_common(vdev); +} + +static const struct vfio_device_ops vfio_platform_ops; +static int vfio_platform_probe(struct platform_device *pdev) +{ + struct vfio_platform_device *vdev; + int ret; + + vdev = vfio_alloc_device(vfio_platform_device, vdev, &pdev->dev, + &vfio_platform_ops); + if (IS_ERR(vdev)) + return PTR_ERR(vdev); + + ret = vfio_register_group_dev(&vdev->vdev); + if (ret) + goto out_put_vdev; + + pm_runtime_enable(&pdev->dev); dev_set_drvdata(&pdev->dev, vdev); return 0; + +out_put_vdev: + vfio_put_device(&vdev->vdev); + return ret; +} + +static void vfio_platform_release_dev(struct vfio_device *core_vdev) +{ + struct vfio_platform_device *vdev = + container_of(core_vdev, struct vfio_platform_device, vdev); + + vfio_platform_release_common(vdev); + vfio_free_device(core_vdev); } static int vfio_platform_remove(struct platform_device *pdev) { struct vfio_platform_device *vdev = dev_get_drvdata(&pdev->dev); - vfio_platform_remove_common(vdev); - kfree(vdev); + vfio_unregister_group_dev(&vdev->vdev); + pm_runtime_disable(vdev->device); + vfio_put_device(&vdev->vdev); return 0; } +static const struct vfio_device_ops vfio_platform_ops = { + .name = "vfio-platform", + .init = vfio_platform_init_dev, + .release = vfio_platform_release_dev, + .open_device = vfio_platform_open_device, + .close_device = vfio_platform_close_device, + .ioctl = vfio_platform_ioctl, + .read = vfio_platform_read, + .write = vfio_platform_write, + .mmap = vfio_platform_mmap, +}; + static struct platform_driver vfio_platform_driver = { .probe = vfio_platform_probe, .remove = vfio_platform_remove, diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 256f55b84e70..55dc4f43c31e 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -218,7 +218,7 @@ static int vfio_platform_call_reset(struct vfio_platform_device *vdev, return -EINVAL; } -static void vfio_platform_close_device(struct vfio_device *core_vdev) +void vfio_platform_close_device(struct vfio_device *core_vdev) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -236,8 +236,9 @@ static void vfio_platform_close_device(struct vfio_device *core_vdev) vfio_platform_regions_cleanup(vdev); vfio_platform_irq_cleanup(vdev); } +EXPORT_SYMBOL_GPL(vfio_platform_close_device); -static int vfio_platform_open_device(struct vfio_device *core_vdev) +int vfio_platform_open_device(struct vfio_device *core_vdev) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -273,9 +274,10 @@ err_irq: vfio_platform_regions_cleanup(vdev); return ret; } +EXPORT_SYMBOL_GPL(vfio_platform_open_device); -static long vfio_platform_ioctl(struct vfio_device *core_vdev, - unsigned int cmd, unsigned long arg) +long vfio_platform_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -382,6 +384,7 @@ static long vfio_platform_ioctl(struct vfio_device *core_vdev, return -ENOTTY; } +EXPORT_SYMBOL_GPL(vfio_platform_ioctl); static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, char __user *buf, size_t count, @@ -438,8 +441,8 @@ err: return -EFAULT; } -static ssize_t vfio_platform_read(struct vfio_device *core_vdev, - char __user *buf, size_t count, loff_t *ppos) +ssize_t vfio_platform_read(struct vfio_device *core_vdev, + char __user *buf, size_t count, loff_t *ppos) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -460,6 +463,7 @@ static ssize_t vfio_platform_read(struct vfio_device *core_vdev, return -EINVAL; } +EXPORT_SYMBOL_GPL(vfio_platform_read); static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, const char __user *buf, size_t count, @@ -515,8 +519,8 @@ err: return -EFAULT; } -static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf, - size_t count, loff_t *ppos) +ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __user *buf, + size_t count, loff_t *ppos) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -537,6 +541,7 @@ static ssize_t vfio_platform_write(struct vfio_device *core_vdev, const char __u return -EINVAL; } +EXPORT_SYMBOL_GPL(vfio_platform_write); static int vfio_platform_mmap_mmio(struct vfio_platform_region region, struct vm_area_struct *vma) @@ -558,7 +563,7 @@ static int vfio_platform_mmap_mmio(struct vfio_platform_region region, req_len, vma->vm_page_prot); } -static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) +int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma) { struct vfio_platform_device *vdev = container_of(core_vdev, struct vfio_platform_device, vdev); @@ -598,16 +603,7 @@ static int vfio_platform_mmap(struct vfio_device *core_vdev, struct vm_area_stru return -EINVAL; } - -static const struct vfio_device_ops vfio_platform_ops = { - .name = "vfio-platform", - .open_device = vfio_platform_open_device, - .close_device = vfio_platform_close_device, - .ioctl = vfio_platform_ioctl, - .read = vfio_platform_read, - .write = vfio_platform_write, - .mmap = vfio_platform_mmap, -}; +EXPORT_SYMBOL_GPL(vfio_platform_mmap); static int vfio_platform_of_probe(struct vfio_platform_device *vdev, struct device *dev) @@ -639,55 +635,34 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev, * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are * valid checks. We cannot claim that this system is DT. */ -int vfio_platform_probe_common(struct vfio_platform_device *vdev, - struct device *dev) +int vfio_platform_init_common(struct vfio_platform_device *vdev) { int ret; - - vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops); + struct device *dev = vdev->vdev.dev; ret = vfio_platform_acpi_probe(vdev, dev); if (ret) ret = vfio_platform_of_probe(vdev, dev); if (ret) - goto out_uninit; + return ret; vdev->device = dev; + mutex_init(&vdev->igate); ret = vfio_platform_get_reset(vdev); - if (ret && vdev->reset_required) { + if (ret && vdev->reset_required) dev_err(dev, "No reset function found for device %s\n", vdev->name); - goto out_uninit; - } - - ret = vfio_register_group_dev(&vdev->vdev); - if (ret) - goto put_reset; - - mutex_init(&vdev->igate); - - pm_runtime_enable(dev); - return 0; - -put_reset: - vfio_platform_put_reset(vdev); -out_uninit: - vfio_uninit_group_dev(&vdev->vdev); return ret; } -EXPORT_SYMBOL_GPL(vfio_platform_probe_common); +EXPORT_SYMBOL_GPL(vfio_platform_init_common); -void vfio_platform_remove_common(struct vfio_platform_device *vdev) +void vfio_platform_release_common(struct vfio_platform_device *vdev) { - vfio_unregister_group_dev(&vdev->vdev); - - pm_runtime_disable(vdev->device); vfio_platform_put_reset(vdev); - vfio_uninit_group_dev(&vdev->vdev); } -EXPORT_SYMBOL_GPL(vfio_platform_remove_common); +EXPORT_SYMBOL_GPL(vfio_platform_release_common); void __vfio_platform_register_reset(struct vfio_platform_reset_node *node) { diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h index 691b43f4b2b2..8d8fab516849 100644 --- a/drivers/vfio/platform/vfio_platform_private.h +++ b/drivers/vfio/platform/vfio_platform_private.h @@ -78,9 +78,21 @@ struct vfio_platform_reset_node { vfio_platform_reset_fn_t of_reset; }; -int vfio_platform_probe_common(struct vfio_platform_device *vdev, - struct device *dev); -void vfio_platform_remove_common(struct vfio_platform_device *vdev); +int vfio_platform_init_common(struct vfio_platform_device *vdev); +void vfio_platform_release_common(struct vfio_platform_device *vdev); + +int vfio_platform_open_device(struct vfio_device *core_vdev); +void vfio_platform_close_device(struct vfio_device *core_vdev); +long vfio_platform_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg); +ssize_t vfio_platform_read(struct vfio_device *core_vdev, + char __user *buf, size_t count, + loff_t *ppos); +ssize_t vfio_platform_write(struct vfio_device *core_vdev, + const char __user *buf, + size_t count, loff_t *ppos); +int vfio_platform_mmap(struct vfio_device *core_vdev, + struct vm_area_struct *vma); int vfio_platform_irq_init(struct vfio_platform_device *vdev); void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev); diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h index 503bea6c843d..bcad54bbab08 100644 --- a/drivers/vfio/vfio.h +++ b/drivers/vfio/vfio.h @@ -3,6 +3,16 @@ * Copyright (C) 2012 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <[email protected]> */ +#ifndef __VFIO_VFIO_H__ +#define __VFIO_VFIO_H__ + +#include <linux/device.h> +#include <linux/cdev.h> +#include <linux/module.h> + +struct iommu_group; +struct vfio_device; +struct vfio_container; enum vfio_group_type { /* @@ -28,6 +38,30 @@ enum vfio_group_type { VFIO_NO_IOMMU, }; +struct vfio_group { + struct device dev; + struct cdev cdev; + /* + * When drivers is non-zero a driver is attached to the struct device + * that provided the iommu_group and thus the iommu_group is a valid + * pointer. When drivers is 0 the driver is being detached. Once users + * reaches 0 then the iommu_group is invalid. + */ + refcount_t drivers; + unsigned int container_users; + struct iommu_group *iommu_group; + struct vfio_container *container; + struct list_head device_list; + struct mutex device_lock; + struct list_head vfio_next; + struct list_head container_next; + enum vfio_group_type type; + struct mutex group_lock; + struct kvm *kvm; + struct file *opened_file; + struct blocking_notifier_head notifier; +}; + /* events for the backend driver notify callback */ enum vfio_iommu_notify_type { VFIO_IOMMU_CONTAINER_CLOSE = 0, @@ -67,5 +101,33 @@ struct vfio_iommu_driver_ops { enum vfio_iommu_notify_type event); }; +struct vfio_iommu_driver { + const struct vfio_iommu_driver_ops *ops; + struct list_head vfio_next; +}; + int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops); + +bool vfio_assert_device_open(struct vfio_device *device); + +struct vfio_container *vfio_container_from_file(struct file *filep); +int vfio_device_assign_container(struct vfio_device *device); +void vfio_device_unassign_container(struct vfio_device *device); +int vfio_container_attach_group(struct vfio_container *container, + struct vfio_group *group); +void vfio_group_detach_container(struct vfio_group *group); +void vfio_device_container_register(struct vfio_device *device); +void vfio_device_container_unregister(struct vfio_device *device); +long vfio_container_ioctl_check_extension(struct vfio_container *container, + unsigned long arg); +int __init vfio_container_init(void); +void vfio_container_cleanup(void); + +#ifdef CONFIG_VFIO_NOIOMMU +extern bool vfio_noiommu __read_mostly; +#else +enum { vfio_noiommu = false }; +#endif + +#endif diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 7cb56c382c97..2d168793d4e1 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -32,6 +32,9 @@ #include <linux/vfio.h> #include <linux/wait.h> #include <linux/sched/signal.h> +#include <linux/pm_runtime.h> +#include <linux/interval_tree.h> +#include <linux/iova_bitmap.h> #include "vfio.h" #define DRIVER_VERSION "0.3" @@ -40,54 +43,14 @@ static struct vfio { struct class *class; - struct list_head iommu_drivers_list; - struct mutex iommu_drivers_lock; struct list_head group_list; struct mutex group_lock; /* locks group_list */ struct ida group_ida; dev_t group_devt; + struct class *device_class; + struct ida device_ida; } vfio; -struct vfio_iommu_driver { - const struct vfio_iommu_driver_ops *ops; - struct list_head vfio_next; -}; - -struct vfio_container { - struct kref kref; - struct list_head group_list; - struct rw_semaphore group_lock; - struct vfio_iommu_driver *iommu_driver; - void *iommu_data; - bool noiommu; -}; - -struct vfio_group { - struct device dev; - struct cdev cdev; - refcount_t users; - unsigned int container_users; - struct iommu_group *iommu_group; - struct vfio_container *container; - struct list_head device_list; - struct mutex device_lock; - struct list_head vfio_next; - struct list_head container_next; - enum vfio_group_type type; - unsigned int dev_counter; - struct rw_semaphore group_rwsem; - struct kvm *kvm; - struct file *opened_file; - struct blocking_notifier_head notifier; -}; - -#ifdef CONFIG_VFIO_NOIOMMU -static bool noiommu __read_mostly; -module_param_named(enable_unsafe_noiommu_mode, - noiommu, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); -#endif - static DEFINE_XARRAY(vfio_device_set_xa); static const struct file_operations vfio_group_fops; @@ -162,146 +125,6 @@ static void vfio_release_device_set(struct vfio_device *device) xa_unlock(&vfio_device_set_xa); } -#ifdef CONFIG_VFIO_NOIOMMU -static void *vfio_noiommu_open(unsigned long arg) -{ - if (arg != VFIO_NOIOMMU_IOMMU) - return ERR_PTR(-EINVAL); - if (!capable(CAP_SYS_RAWIO)) - return ERR_PTR(-EPERM); - - return NULL; -} - -static void vfio_noiommu_release(void *iommu_data) -{ -} - -static long vfio_noiommu_ioctl(void *iommu_data, - unsigned int cmd, unsigned long arg) -{ - if (cmd == VFIO_CHECK_EXTENSION) - return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0; - - return -ENOTTY; -} - -static int vfio_noiommu_attach_group(void *iommu_data, - struct iommu_group *iommu_group, enum vfio_group_type type) -{ - return 0; -} - -static void vfio_noiommu_detach_group(void *iommu_data, - struct iommu_group *iommu_group) -{ -} - -static const struct vfio_iommu_driver_ops vfio_noiommu_ops = { - .name = "vfio-noiommu", - .owner = THIS_MODULE, - .open = vfio_noiommu_open, - .release = vfio_noiommu_release, - .ioctl = vfio_noiommu_ioctl, - .attach_group = vfio_noiommu_attach_group, - .detach_group = vfio_noiommu_detach_group, -}; - -/* - * Only noiommu containers can use vfio-noiommu and noiommu containers can only - * use vfio-noiommu. - */ -static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, - const struct vfio_iommu_driver *driver) -{ - return container->noiommu == (driver->ops == &vfio_noiommu_ops); -} -#else -static inline bool vfio_iommu_driver_allowed(struct vfio_container *container, - const struct vfio_iommu_driver *driver) -{ - return true; -} -#endif /* CONFIG_VFIO_NOIOMMU */ - -/* - * IOMMU driver registration - */ -int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) -{ - struct vfio_iommu_driver *driver, *tmp; - - if (WARN_ON(!ops->register_device != !ops->unregister_device)) - return -EINVAL; - - driver = kzalloc(sizeof(*driver), GFP_KERNEL); - if (!driver) - return -ENOMEM; - - driver->ops = ops; - - mutex_lock(&vfio.iommu_drivers_lock); - - /* Check for duplicates */ - list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { - if (tmp->ops == ops) { - mutex_unlock(&vfio.iommu_drivers_lock); - kfree(driver); - return -EINVAL; - } - } - - list_add(&driver->vfio_next, &vfio.iommu_drivers_list); - - mutex_unlock(&vfio.iommu_drivers_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); - -void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) -{ - struct vfio_iommu_driver *driver; - - mutex_lock(&vfio.iommu_drivers_lock); - list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { - if (driver->ops == ops) { - list_del(&driver->vfio_next); - mutex_unlock(&vfio.iommu_drivers_lock); - kfree(driver); - return; - } - } - mutex_unlock(&vfio.iommu_drivers_lock); -} -EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); - -static void vfio_group_get(struct vfio_group *group); - -/* - * Container objects - containers are created when /dev/vfio/vfio is - * opened, but their lifecycle extends until the last user is done, so - * it's freed via kref. Must support container/group/device being - * closed in any order. - */ -static void vfio_container_get(struct vfio_container *container) -{ - kref_get(&container->kref); -} - -static void vfio_container_release(struct kref *kref) -{ - struct vfio_container *container; - container = container_of(kref, struct vfio_container, kref); - - kfree(container); -} - -static void vfio_container_put(struct vfio_container *container) -{ - kref_put(&container->kref, vfio_container_release); -} - /* * Group objects - create, release, get, put, search */ @@ -310,9 +133,13 @@ __vfio_group_get_from_iommu(struct iommu_group *iommu_group) { struct vfio_group *group; + /* + * group->iommu_group from the vfio.group_list cannot be NULL + * under the vfio.group_lock. + */ list_for_each_entry(group, &vfio.group_list, vfio_next) { if (group->iommu_group == iommu_group) { - vfio_group_get(group); + refcount_inc(&group->drivers); return group; } } @@ -335,7 +162,8 @@ static void vfio_group_release(struct device *dev) struct vfio_group *group = container_of(dev, struct vfio_group, dev); mutex_destroy(&group->device_lock); - iommu_group_put(group->iommu_group); + mutex_destroy(&group->group_lock); + WARN_ON(group->iommu_group); ida_free(&vfio.group_ida, MINOR(group->dev.devt)); kfree(group); } @@ -363,8 +191,8 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, cdev_init(&group->cdev, &vfio_group_fops); group->cdev.owner = THIS_MODULE; - refcount_set(&group->users, 1); - init_rwsem(&group->group_rwsem); + refcount_set(&group->drivers, 1); + mutex_init(&group->group_lock); INIT_LIST_HEAD(&group->device_list); mutex_init(&group->device_lock); group->iommu_group = iommu_group; @@ -420,44 +248,64 @@ err_put: return ret; } -static void vfio_group_put(struct vfio_group *group) +static void vfio_device_remove_group(struct vfio_device *device) { - if (!refcount_dec_and_mutex_lock(&group->users, &vfio.group_lock)) + struct vfio_group *group = device->group; + struct iommu_group *iommu_group; + + if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) + iommu_group_remove_device(device->dev); + + /* Pairs with vfio_create_group() / vfio_group_get_from_iommu() */ + if (!refcount_dec_and_mutex_lock(&group->drivers, &vfio.group_lock)) return; + list_del(&group->vfio_next); /* + * We could concurrently probe another driver in the group that might + * race vfio_device_remove_group() with vfio_get_group(), so we have to + * ensure that the sysfs is all cleaned up under lock otherwise the + * cdev_device_add() will fail due to the name aready existing. + */ + cdev_device_del(&group->cdev, &group->dev); + + mutex_lock(&group->group_lock); + /* * These data structures all have paired operations that can only be - * undone when the caller holds a live reference on the group. Since all - * pairs must be undone these WARN_ON's indicate some caller did not + * undone when the caller holds a live reference on the device. Since + * all pairs must be undone these WARN_ON's indicate some caller did not * properly hold the group reference. */ WARN_ON(!list_empty(&group->device_list)); - WARN_ON(group->container || group->container_users); WARN_ON(group->notifier.head); - list_del(&group->vfio_next); - cdev_device_del(&group->cdev, &group->dev); + /* + * Revoke all users of group->iommu_group. At this point we know there + * are no devices active because we are unplugging the last one. Setting + * iommu_group to NULL blocks all new users. + */ + if (group->container) + vfio_group_detach_container(group); + iommu_group = group->iommu_group; + group->iommu_group = NULL; + mutex_unlock(&group->group_lock); mutex_unlock(&vfio.group_lock); + iommu_group_put(iommu_group); put_device(&group->dev); } -static void vfio_group_get(struct vfio_group *group) -{ - refcount_inc(&group->users); -} - /* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ -static void vfio_device_put(struct vfio_device *device) +static void vfio_device_put_registration(struct vfio_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->comp); } -static bool vfio_device_try_get(struct vfio_device *device) +static bool vfio_device_try_get_registration(struct vfio_device *device) { return refcount_inc_not_zero(&device->refcount); } @@ -469,7 +317,8 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group, mutex_lock(&group->device_lock); list_for_each_entry(device, &group->device_list, group_next) { - if (device->dev == dev && vfio_device_try_get(device)) { + if (device->dev == dev && + vfio_device_try_get_registration(device)) { mutex_unlock(&group->device_lock); return device; } @@ -481,20 +330,110 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group, /* * VFIO driver API */ -void vfio_init_group_dev(struct vfio_device *device, struct device *dev, - const struct vfio_device_ops *ops) +/* Release helper called by vfio_put_device() */ +static void vfio_device_release(struct device *dev) +{ + struct vfio_device *device = + container_of(dev, struct vfio_device, device); + + vfio_release_device_set(device); + ida_free(&vfio.device_ida, device->index); + + /* + * kvfree() cannot be done here due to a life cycle mess in + * vfio-ccw. Before the ccw part is fixed all drivers are + * required to support @release and call vfio_free_device() + * from there. + */ + device->ops->release(device); +} + +/* + * Allocate and initialize vfio_device so it can be registered to vfio + * core. + * + * Drivers should use the wrapper vfio_alloc_device() for allocation. + * @size is the size of the structure to be allocated, including any + * private data used by the driver. + * + * Driver may provide an @init callback to cover device private data. + * + * Use vfio_put_device() to release the structure after success return. + */ +struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, + const struct vfio_device_ops *ops) { + struct vfio_device *device; + int ret; + + if (WARN_ON(size < sizeof(struct vfio_device))) + return ERR_PTR(-EINVAL); + + device = kvzalloc(size, GFP_KERNEL); + if (!device) + return ERR_PTR(-ENOMEM); + + ret = vfio_init_device(device, dev, ops); + if (ret) + goto out_free; + return device; + +out_free: + kvfree(device); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(_vfio_alloc_device); + +/* + * Initialize a vfio_device so it can be registered to vfio core. + * + * Only vfio-ccw driver should call this interface. + */ +int vfio_init_device(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops) +{ + int ret; + + ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL); + if (ret < 0) { + dev_dbg(dev, "Error to alloc index\n"); + return ret; + } + + device->index = ret; init_completion(&device->comp); device->dev = dev; device->ops = ops; + + if (ops->init) { + ret = ops->init(device); + if (ret) + goto out_uninit; + } + + device_initialize(&device->device); + device->device.release = vfio_device_release; + device->device.class = vfio.device_class; + device->device.parent = device->dev; + return 0; + +out_uninit: + vfio_release_device_set(device); + ida_free(&vfio.device_ida, device->index); + return ret; } -EXPORT_SYMBOL_GPL(vfio_init_group_dev); +EXPORT_SYMBOL_GPL(vfio_init_device); -void vfio_uninit_group_dev(struct vfio_device *device) +/* + * The helper called by driver @release callback to free the device + * structure. Drivers which don't have private data to clean can + * simply use this helper as its @release. + */ +void vfio_free_device(struct vfio_device *device) { - vfio_release_device_set(device); + kvfree(device); } -EXPORT_SYMBOL_GPL(vfio_uninit_group_dev); +EXPORT_SYMBOL_GPL(vfio_free_device); static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev, enum vfio_group_type type) @@ -535,8 +474,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) struct vfio_group *group; iommu_group = iommu_group_get(dev); -#ifdef CONFIG_VFIO_NOIOMMU - if (!iommu_group && noiommu) { + if (!iommu_group && vfio_noiommu) { /* * With noiommu enabled, create an IOMMU group for devices that * don't already have one, implying no IOMMU hardware/driver @@ -550,7 +488,7 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) } return group; } -#endif + if (!iommu_group) return ERR_PTR(-EINVAL); @@ -577,7 +515,12 @@ static int __vfio_register_dev(struct vfio_device *device, struct vfio_group *group) { struct vfio_device *existing_device; + int ret; + /* + * In all cases group is the output of one of the group allocation + * functions and we have group->drivers incremented for us. + */ if (IS_ERR(group)) return PTR_ERR(group); @@ -590,28 +533,39 @@ static int __vfio_register_dev(struct vfio_device *device, existing_device = vfio_group_get_device(group, device->dev); if (existing_device) { + /* + * group->iommu_group is non-NULL because we hold the drivers + * refcount. + */ dev_WARN(device->dev, "Device already exists on group %d\n", iommu_group_id(group->iommu_group)); - vfio_device_put(existing_device); - if (group->type == VFIO_NO_IOMMU || - group->type == VFIO_EMULATED_IOMMU) - iommu_group_remove_device(device->dev); - vfio_group_put(group); - return -EBUSY; + vfio_device_put_registration(existing_device); + ret = -EBUSY; + goto err_out; } /* Our reference on group is moved to the device */ device->group = group; + ret = dev_set_name(&device->device, "vfio%d", device->index); + if (ret) + goto err_out; + + ret = device_add(&device->device); + if (ret) + goto err_out; + /* Refcounting can't start until the driver calls register */ refcount_set(&device->refcount, 1); mutex_lock(&group->device_lock); list_add(&device->group_next, &group->device_list); - group->dev_counter++; mutex_unlock(&group->device_lock); return 0; +err_out: + vfio_device_remove_group(device); + return ret; } int vfio_register_group_dev(struct vfio_device *device) @@ -651,7 +605,7 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, ret = !strcmp(dev_name(it->dev), buf); } - if (ret && vfio_device_try_get(it)) { + if (ret && vfio_device_try_get_registration(it)) { device = it; break; } @@ -671,7 +625,7 @@ void vfio_unregister_group_dev(struct vfio_device *device) bool interrupted = false; long rc; - vfio_device_put(device); + vfio_device_put_registration(device); rc = try_wait_for_completion(&device->comp); while (rc <= 0) { if (device->ops->request) @@ -696,356 +650,78 @@ void vfio_unregister_group_dev(struct vfio_device *device) mutex_lock(&group->device_lock); list_del(&device->group_next); - group->dev_counter--; mutex_unlock(&group->device_lock); - if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) - iommu_group_remove_device(device->dev); + /* Balances device_add in register path */ + device_del(&device->device); - /* Matches the get in vfio_register_group_dev() */ - vfio_group_put(group); + vfio_device_remove_group(device); } EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); /* - * VFIO base fd, /dev/vfio/vfio - */ -static long vfio_ioctl_check_extension(struct vfio_container *container, - unsigned long arg) -{ - struct vfio_iommu_driver *driver; - long ret = 0; - - down_read(&container->group_lock); - - driver = container->iommu_driver; - - switch (arg) { - /* No base extensions yet */ - default: - /* - * If no driver is set, poll all registered drivers for - * extensions and return the first positive result. If - * a driver is already set, further queries will be passed - * only to that driver. - */ - if (!driver) { - mutex_lock(&vfio.iommu_drivers_lock); - list_for_each_entry(driver, &vfio.iommu_drivers_list, - vfio_next) { - - if (!list_empty(&container->group_list) && - !vfio_iommu_driver_allowed(container, - driver)) - continue; - if (!try_module_get(driver->ops->owner)) - continue; - - ret = driver->ops->ioctl(NULL, - VFIO_CHECK_EXTENSION, - arg); - module_put(driver->ops->owner); - if (ret > 0) - break; - } - mutex_unlock(&vfio.iommu_drivers_lock); - } else - ret = driver->ops->ioctl(container->iommu_data, - VFIO_CHECK_EXTENSION, arg); - } - - up_read(&container->group_lock); - - return ret; -} - -/* hold write lock on container->group_lock */ -static int __vfio_container_attach_groups(struct vfio_container *container, - struct vfio_iommu_driver *driver, - void *data) -{ - struct vfio_group *group; - int ret = -ENODEV; - - list_for_each_entry(group, &container->group_list, container_next) { - ret = driver->ops->attach_group(data, group->iommu_group, - group->type); - if (ret) - goto unwind; - } - - return ret; - -unwind: - list_for_each_entry_continue_reverse(group, &container->group_list, - container_next) { - driver->ops->detach_group(data, group->iommu_group); - } - - return ret; -} - -static long vfio_ioctl_set_iommu(struct vfio_container *container, - unsigned long arg) -{ - struct vfio_iommu_driver *driver; - long ret = -ENODEV; - - down_write(&container->group_lock); - - /* - * The container is designed to be an unprivileged interface while - * the group can be assigned to specific users. Therefore, only by - * adding a group to a container does the user get the privilege of - * enabling the iommu, which may allocate finite resources. There - * is no unset_iommu, but by removing all the groups from a container, - * the container is deprivileged and returns to an unset state. - */ - if (list_empty(&container->group_list) || container->iommu_driver) { - up_write(&container->group_lock); - return -EINVAL; - } - - mutex_lock(&vfio.iommu_drivers_lock); - list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { - void *data; - - if (!vfio_iommu_driver_allowed(container, driver)) - continue; - if (!try_module_get(driver->ops->owner)) - continue; - - /* - * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, - * so test which iommu driver reported support for this - * extension and call open on them. We also pass them the - * magic, allowing a single driver to support multiple - * interfaces if they'd like. - */ - if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { - module_put(driver->ops->owner); - continue; - } - - data = driver->ops->open(arg); - if (IS_ERR(data)) { - ret = PTR_ERR(data); - module_put(driver->ops->owner); - continue; - } - - ret = __vfio_container_attach_groups(container, driver, data); - if (ret) { - driver->ops->release(data); - module_put(driver->ops->owner); - continue; - } - - container->iommu_driver = driver; - container->iommu_data = data; - break; - } - - mutex_unlock(&vfio.iommu_drivers_lock); - up_write(&container->group_lock); - - return ret; -} - -static long vfio_fops_unl_ioctl(struct file *filep, - unsigned int cmd, unsigned long arg) -{ - struct vfio_container *container = filep->private_data; - struct vfio_iommu_driver *driver; - void *data; - long ret = -EINVAL; - - if (!container) - return ret; - - switch (cmd) { - case VFIO_GET_API_VERSION: - ret = VFIO_API_VERSION; - break; - case VFIO_CHECK_EXTENSION: - ret = vfio_ioctl_check_extension(container, arg); - break; - case VFIO_SET_IOMMU: - ret = vfio_ioctl_set_iommu(container, arg); - break; - default: - driver = container->iommu_driver; - data = container->iommu_data; - - if (driver) /* passthrough all unrecognized ioctls */ - ret = driver->ops->ioctl(data, cmd, arg); - } - - return ret; -} - -static int vfio_fops_open(struct inode *inode, struct file *filep) -{ - struct vfio_container *container; - - container = kzalloc(sizeof(*container), GFP_KERNEL); - if (!container) - return -ENOMEM; - - INIT_LIST_HEAD(&container->group_list); - init_rwsem(&container->group_lock); - kref_init(&container->kref); - - filep->private_data = container; - - return 0; -} - -static int vfio_fops_release(struct inode *inode, struct file *filep) -{ - struct vfio_container *container = filep->private_data; - struct vfio_iommu_driver *driver = container->iommu_driver; - - if (driver && driver->ops->notify) - driver->ops->notify(container->iommu_data, - VFIO_IOMMU_CONTAINER_CLOSE); - - filep->private_data = NULL; - - vfio_container_put(container); - - return 0; -} - -static const struct file_operations vfio_fops = { - .owner = THIS_MODULE, - .open = vfio_fops_open, - .release = vfio_fops_release, - .unlocked_ioctl = vfio_fops_unl_ioctl, - .compat_ioctl = compat_ptr_ioctl, -}; - -/* * VFIO Group fd, /dev/vfio/$GROUP */ -static void __vfio_group_unset_container(struct vfio_group *group) -{ - struct vfio_container *container = group->container; - struct vfio_iommu_driver *driver; - - lockdep_assert_held_write(&group->group_rwsem); - - down_write(&container->group_lock); - - driver = container->iommu_driver; - if (driver) - driver->ops->detach_group(container->iommu_data, - group->iommu_group); - - if (group->type == VFIO_IOMMU) - iommu_group_release_dma_owner(group->iommu_group); - - group->container = NULL; - group->container_users = 0; - list_del(&group->container_next); - - /* Detaching the last group deprivileges a container, remove iommu */ - if (driver && list_empty(&container->group_list)) { - driver->ops->release(container->iommu_data); - module_put(driver->ops->owner); - container->iommu_driver = NULL; - container->iommu_data = NULL; - } - - up_write(&container->group_lock); - - vfio_container_put(container); -} - /* * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or * if there was no container to unset. Since the ioctl is called on * the group, we know that still exists, therefore the only valid * transition here is 1->0. */ -static int vfio_group_unset_container(struct vfio_group *group) +static int vfio_group_ioctl_unset_container(struct vfio_group *group) { - lockdep_assert_held_write(&group->group_rwsem); + int ret = 0; - if (!group->container) - return -EINVAL; - if (group->container_users != 1) - return -EBUSY; - __vfio_group_unset_container(group); - return 0; + mutex_lock(&group->group_lock); + if (!group->container) { + ret = -EINVAL; + goto out_unlock; + } + if (group->container_users != 1) { + ret = -EBUSY; + goto out_unlock; + } + vfio_group_detach_container(group); + +out_unlock: + mutex_unlock(&group->group_lock); + return ret; } -static int vfio_group_set_container(struct vfio_group *group, int container_fd) +static int vfio_group_ioctl_set_container(struct vfio_group *group, + int __user *arg) { - struct fd f; struct vfio_container *container; - struct vfio_iommu_driver *driver; - int ret = 0; - - lockdep_assert_held_write(&group->group_rwsem); - - if (group->container || WARN_ON(group->container_users)) - return -EINVAL; + struct fd f; + int ret; + int fd; - if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) - return -EPERM; + if (get_user(fd, arg)) + return -EFAULT; - f = fdget(container_fd); + f = fdget(fd); if (!f.file) return -EBADF; - /* Sanity check, is this really our fd? */ - if (f.file->f_op != &vfio_fops) { - fdput(f); - return -EINVAL; + mutex_lock(&group->group_lock); + if (group->container || WARN_ON(group->container_users)) { + ret = -EINVAL; + goto out_unlock; } - - container = f.file->private_data; - WARN_ON(!container); /* fget ensures we don't race vfio_release */ - - down_write(&container->group_lock); - - /* Real groups and fake groups cannot mix */ - if (!list_empty(&container->group_list) && - container->noiommu != (group->type == VFIO_NO_IOMMU)) { - ret = -EPERM; - goto unlock_out; - } - - if (group->type == VFIO_IOMMU) { - ret = iommu_group_claim_dma_owner(group->iommu_group, f.file); - if (ret) - goto unlock_out; + if (!group->iommu_group) { + ret = -ENODEV; + goto out_unlock; } - driver = container->iommu_driver; - if (driver) { - ret = driver->ops->attach_group(container->iommu_data, - group->iommu_group, - group->type); - if (ret) { - if (group->type == VFIO_IOMMU) - iommu_group_release_dma_owner( - group->iommu_group); - goto unlock_out; - } + container = vfio_container_from_file(f.file); + ret = -EINVAL; + if (container) { + ret = vfio_container_attach_group(container, group); + goto out_unlock; } - group->container = container; - group->container_users = 1; - container->noiommu = (group->type == VFIO_NO_IOMMU); - list_add(&group->container_next, &container->group_list); - - /* Get a reference on the container and mark a user within the group */ - vfio_container_get(container); - -unlock_out: - up_write(&container->group_lock); +out_unlock: + mutex_unlock(&group->group_lock); fdput(f); return ret; } @@ -1053,47 +729,19 @@ unlock_out: static const struct file_operations vfio_device_fops; /* true if the vfio_device has open_device() called but not close_device() */ -static bool vfio_assert_device_open(struct vfio_device *device) +bool vfio_assert_device_open(struct vfio_device *device) { return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); } -static int vfio_device_assign_container(struct vfio_device *device) -{ - struct vfio_group *group = device->group; - - lockdep_assert_held_write(&group->group_rwsem); - - if (!group->container || !group->container->iommu_driver || - WARN_ON(!group->container_users)) - return -EINVAL; - - if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) - return -EPERM; - - get_file(group->opened_file); - group->container_users++; - return 0; -} - -static void vfio_device_unassign_container(struct vfio_device *device) -{ - down_write(&device->group->group_rwsem); - WARN_ON(device->group->container_users <= 1); - device->group->container_users--; - fput(device->group->opened_file); - up_write(&device->group->group_rwsem); -} - static struct file *vfio_device_open(struct vfio_device *device) { - struct vfio_iommu_driver *iommu_driver; struct file *filep; int ret; - down_write(&device->group->group_rwsem); + mutex_lock(&device->group->group_lock); ret = vfio_device_assign_container(device); - up_write(&device->group->group_rwsem); + mutex_unlock(&device->group->group_lock); if (ret) return ERR_PTR(ret); @@ -1110,7 +758,7 @@ static struct file *vfio_device_open(struct vfio_device *device) * lock. If the device driver will use it, it must obtain a * reference and release it during close_device. */ - down_read(&device->group->group_rwsem); + mutex_lock(&device->group->group_lock); device->kvm = device->group->kvm; if (device->ops->open_device) { @@ -1118,13 +766,8 @@ static struct file *vfio_device_open(struct vfio_device *device) if (ret) goto err_undo_count; } - - iommu_driver = device->group->container->iommu_driver; - if (iommu_driver && iommu_driver->ops->register_device) - iommu_driver->ops->register_device( - device->group->container->iommu_data, device); - - up_read(&device->group->group_rwsem); + vfio_device_container_register(device); + mutex_unlock(&device->group->group_lock); } mutex_unlock(&device->dev_set->lock); @@ -1157,17 +800,14 @@ static struct file *vfio_device_open(struct vfio_device *device) err_close_device: mutex_lock(&device->dev_set->lock); - down_read(&device->group->group_rwsem); + mutex_lock(&device->group->group_lock); if (device->open_count == 1 && device->ops->close_device) { device->ops->close_device(device); - iommu_driver = device->group->container->iommu_driver; - if (iommu_driver && iommu_driver->ops->unregister_device) - iommu_driver->ops->unregister_device( - device->group->container->iommu_data, device); + vfio_device_container_unregister(device); } err_undo_count: - up_read(&device->group->group_rwsem); + mutex_unlock(&device->group->group_lock); device->open_count--; if (device->open_count == 0 && device->kvm) device->kvm = NULL; @@ -1178,14 +818,21 @@ err_unassign_container: return ERR_PTR(ret); } -static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) +static int vfio_group_ioctl_get_device_fd(struct vfio_group *group, + char __user *arg) { struct vfio_device *device; struct file *filep; + char *buf; int fdno; int ret; + buf = strndup_user(arg, PAGE_SIZE); + if (IS_ERR(buf)) + return PTR_ERR(buf); + device = vfio_device_get_from_name(group, buf); + kfree(buf); if (IS_ERR(device)) return PTR_ERR(device); @@ -1207,81 +854,60 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) err_put_fdno: put_unused_fd(fdno); err_put_device: - vfio_device_put(device); + vfio_device_put_registration(device); return ret; } -static long vfio_group_fops_unl_ioctl(struct file *filep, - unsigned int cmd, unsigned long arg) +static int vfio_group_ioctl_get_status(struct vfio_group *group, + struct vfio_group_status __user *arg) { - struct vfio_group *group = filep->private_data; - long ret = -ENOTTY; + unsigned long minsz = offsetofend(struct vfio_group_status, flags); + struct vfio_group_status status; - switch (cmd) { - case VFIO_GROUP_GET_STATUS: - { - struct vfio_group_status status; - unsigned long minsz; + if (copy_from_user(&status, arg, minsz)) + return -EFAULT; - minsz = offsetofend(struct vfio_group_status, flags); + if (status.argsz < minsz) + return -EINVAL; - if (copy_from_user(&status, (void __user *)arg, minsz)) - return -EFAULT; + status.flags = 0; - if (status.argsz < minsz) - return -EINVAL; + mutex_lock(&group->group_lock); + if (!group->iommu_group) { + mutex_unlock(&group->group_lock); + return -ENODEV; + } - status.flags = 0; + if (group->container) + status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | + VFIO_GROUP_FLAGS_VIABLE; + else if (!iommu_group_dma_owner_claimed(group->iommu_group)) + status.flags |= VFIO_GROUP_FLAGS_VIABLE; + mutex_unlock(&group->group_lock); - down_read(&group->group_rwsem); - if (group->container) - status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | - VFIO_GROUP_FLAGS_VIABLE; - else if (!iommu_group_dma_owner_claimed(group->iommu_group)) - status.flags |= VFIO_GROUP_FLAGS_VIABLE; - up_read(&group->group_rwsem); + if (copy_to_user(arg, &status, minsz)) + return -EFAULT; + return 0; +} - if (copy_to_user((void __user *)arg, &status, minsz)) - return -EFAULT; +static long vfio_group_fops_unl_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vfio_group *group = filep->private_data; + void __user *uarg = (void __user *)arg; - ret = 0; - break; - } + switch (cmd) { + case VFIO_GROUP_GET_DEVICE_FD: + return vfio_group_ioctl_get_device_fd(group, uarg); + case VFIO_GROUP_GET_STATUS: + return vfio_group_ioctl_get_status(group, uarg); case VFIO_GROUP_SET_CONTAINER: - { - int fd; - - if (get_user(fd, (int __user *)arg)) - return -EFAULT; - - if (fd < 0) - return -EINVAL; - - down_write(&group->group_rwsem); - ret = vfio_group_set_container(group, fd); - up_write(&group->group_rwsem); - break; - } + return vfio_group_ioctl_set_container(group, uarg); case VFIO_GROUP_UNSET_CONTAINER: - down_write(&group->group_rwsem); - ret = vfio_group_unset_container(group); - up_write(&group->group_rwsem); - break; - case VFIO_GROUP_GET_DEVICE_FD: - { - char *buf; - - buf = strndup_user((const char __user *)arg, PAGE_SIZE); - if (IS_ERR(buf)) - return PTR_ERR(buf); - - ret = vfio_group_get_device_fd(group, buf); - kfree(buf); - break; - } + return vfio_group_ioctl_unset_container(group); + default: + return -ENOTTY; } - - return ret; } static int vfio_group_fops_open(struct inode *inode, struct file *filep) @@ -1290,17 +916,20 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) container_of(inode->i_cdev, struct vfio_group, cdev); int ret; - down_write(&group->group_rwsem); + mutex_lock(&group->group_lock); - /* users can be zero if this races with vfio_group_put() */ - if (!refcount_inc_not_zero(&group->users)) { + /* + * drivers can be zero if this races with vfio_device_remove_group(), it + * will be stable at 0 under the group rwsem + */ + if (refcount_read(&group->drivers) == 0) { ret = -ENODEV; - goto err_unlock; + goto out_unlock; } if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { ret = -EPERM; - goto err_put; + goto out_unlock; } /* @@ -1308,17 +937,13 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) */ if (group->opened_file) { ret = -EBUSY; - goto err_put; + goto out_unlock; } group->opened_file = filep; filep->private_data = group; - - up_write(&group->group_rwsem); - return 0; -err_put: - vfio_group_put(group); -err_unlock: - up_write(&group->group_rwsem); + ret = 0; +out_unlock: + mutex_unlock(&group->group_lock); return ret; } @@ -1328,21 +953,16 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) filep->private_data = NULL; - down_write(&group->group_rwsem); + mutex_lock(&group->group_lock); /* * Device FDs hold a group file reference, therefore the group release * is only called when there are no open devices. */ WARN_ON(group->notifier.head); - if (group->container) { - WARN_ON(group->container_users != 1); - __vfio_group_unset_container(group); - } + if (group->container) + vfio_group_detach_container(group); group->opened_file = NULL; - up_write(&group->group_rwsem); - - vfio_group_put(group); - + mutex_unlock(&group->group_lock); return 0; } @@ -1355,24 +975,53 @@ static const struct file_operations vfio_group_fops = { }; /* + * Wrapper around pm_runtime_resume_and_get(). + * Return error code on failure or 0 on success. + */ +static inline int vfio_device_pm_runtime_get(struct vfio_device *device) +{ + struct device *dev = device->dev; + + if (dev->driver && dev->driver->pm) { + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_info_ratelimited(dev, + "vfio: runtime resume failed %d\n", ret); + return -EIO; + } + } + + return 0; +} + +/* + * Wrapper around pm_runtime_put(). + */ +static inline void vfio_device_pm_runtime_put(struct vfio_device *device) +{ + struct device *dev = device->dev; + + if (dev->driver && dev->driver->pm) + pm_runtime_put(dev); +} + +/* * VFIO Device fd */ static int vfio_device_fops_release(struct inode *inode, struct file *filep) { struct vfio_device *device = filep->private_data; - struct vfio_iommu_driver *iommu_driver; mutex_lock(&device->dev_set->lock); vfio_assert_device_open(device); - down_read(&device->group->group_rwsem); + mutex_lock(&device->group->group_lock); if (device->open_count == 1 && device->ops->close_device) device->ops->close_device(device); - iommu_driver = device->group->container->iommu_driver; - if (iommu_driver && iommu_driver->ops->unregister_device) - iommu_driver->ops->unregister_device( - device->group->container->iommu_data, device); - up_read(&device->group->group_rwsem); + vfio_device_container_unregister(device); + mutex_unlock(&device->group->group_lock); device->open_count--; if (device->open_count == 0) device->kvm = NULL; @@ -1382,7 +1031,7 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) vfio_device_unassign_container(device); - vfio_device_put(device); + vfio_device_put_registration(device); return 0; } @@ -1628,6 +1277,167 @@ static int vfio_ioctl_device_feature_migration(struct vfio_device *device, return 0; } +/* Ranges should fit into a single kernel page */ +#define LOG_MAX_RANGES \ + (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range)) + +static int +vfio_ioctl_device_feature_logging_start(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + size_t minsz = + offsetofend(struct vfio_device_feature_dma_logging_control, + ranges); + struct vfio_device_feature_dma_logging_range __user *ranges; + struct vfio_device_feature_dma_logging_control control; + struct vfio_device_feature_dma_logging_range range; + struct rb_root_cached root = RB_ROOT_CACHED; + struct interval_tree_node *nodes; + u64 iova_end; + u32 nnodes; + int i, ret; + + if (!device->log_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_SET, + sizeof(control)); + if (ret != 1) + return ret; + + if (copy_from_user(&control, arg, minsz)) + return -EFAULT; + + nnodes = control.num_ranges; + if (!nnodes) + return -EINVAL; + + if (nnodes > LOG_MAX_RANGES) + return -E2BIG; + + ranges = u64_to_user_ptr(control.ranges); + nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), + GFP_KERNEL); + if (!nodes) + return -ENOMEM; + + for (i = 0; i < nnodes; i++) { + if (copy_from_user(&range, &ranges[i], sizeof(range))) { + ret = -EFAULT; + goto end; + } + if (!IS_ALIGNED(range.iova, control.page_size) || + !IS_ALIGNED(range.length, control.page_size)) { + ret = -EINVAL; + goto end; + } + + if (check_add_overflow(range.iova, range.length, &iova_end) || + iova_end > ULONG_MAX) { + ret = -EOVERFLOW; + goto end; + } + + nodes[i].start = range.iova; + nodes[i].last = range.iova + range.length - 1; + if (interval_tree_iter_first(&root, nodes[i].start, + nodes[i].last)) { + /* Range overlapping */ + ret = -EINVAL; + goto end; + } + interval_tree_insert(nodes + i, &root); + } + + ret = device->log_ops->log_start(device, &root, nnodes, + &control.page_size); + if (ret) + goto end; + + if (copy_to_user(arg, &control, sizeof(control))) { + ret = -EFAULT; + device->log_ops->log_stop(device); + } + +end: + kfree(nodes); + return ret; +} + +static int +vfio_ioctl_device_feature_logging_stop(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + int ret; + + if (!device->log_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_SET, 0); + if (ret != 1) + return ret; + + return device->log_ops->log_stop(device); +} + +static int vfio_device_log_read_and_clear(struct iova_bitmap *iter, + unsigned long iova, size_t length, + void *opaque) +{ + struct vfio_device *device = opaque; + + return device->log_ops->log_read_and_clear(device, iova, length, iter); +} + +static int +vfio_ioctl_device_feature_logging_report(struct vfio_device *device, + u32 flags, void __user *arg, + size_t argsz) +{ + size_t minsz = + offsetofend(struct vfio_device_feature_dma_logging_report, + bitmap); + struct vfio_device_feature_dma_logging_report report; + struct iova_bitmap *iter; + u64 iova_end; + int ret; + + if (!device->log_ops) + return -ENOTTY; + + ret = vfio_check_feature(flags, argsz, + VFIO_DEVICE_FEATURE_GET, + sizeof(report)); + if (ret != 1) + return ret; + + if (copy_from_user(&report, arg, minsz)) + return -EFAULT; + + if (report.page_size < SZ_4K || !is_power_of_2(report.page_size)) + return -EINVAL; + + if (check_add_overflow(report.iova, report.length, &iova_end) || + iova_end > ULONG_MAX) + return -EOVERFLOW; + + iter = iova_bitmap_alloc(report.iova, report.length, + report.page_size, + u64_to_user_ptr(report.bitmap)); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + ret = iova_bitmap_for_each(iter, device, + vfio_device_log_read_and_clear); + + iova_bitmap_free(iter); + return ret; +} + static int vfio_ioctl_device_feature(struct vfio_device *device, struct vfio_device_feature __user *arg) { @@ -1661,6 +1471,18 @@ static int vfio_ioctl_device_feature(struct vfio_device *device, return vfio_ioctl_device_feature_mig_device_state( device, feature.flags, arg->data, feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_DMA_LOGGING_START: + return vfio_ioctl_device_feature_logging_start( + device, feature.flags, arg->data, + feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP: + return vfio_ioctl_device_feature_logging_stop( + device, feature.flags, arg->data, + feature.argsz - minsz); + case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT: + return vfio_ioctl_device_feature_logging_report( + device, feature.flags, arg->data, + feature.argsz - minsz); default: if (unlikely(!device->ops->device_feature)) return -EINVAL; @@ -1674,15 +1496,27 @@ static long vfio_device_fops_unl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct vfio_device *device = filep->private_data; + int ret; + + ret = vfio_device_pm_runtime_get(device); + if (ret) + return ret; switch (cmd) { case VFIO_DEVICE_FEATURE: - return vfio_ioctl_device_feature(device, (void __user *)arg); + ret = vfio_ioctl_device_feature(device, (void __user *)arg); + break; + default: if (unlikely(!device->ops->ioctl)) - return -EINVAL; - return device->ops->ioctl(device, cmd, arg); + ret = -EINVAL; + else + ret = device->ops->ioctl(device, cmd, arg); + break; } + + vfio_device_pm_runtime_put(device); + return ret; } static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, @@ -1732,19 +1566,42 @@ static const struct file_operations vfio_device_fops = { * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file * @file: VFIO group file * - * The returned iommu_group is valid as long as a ref is held on the file. + * The returned iommu_group is valid as long as a ref is held on the file. This + * returns a reference on the group. This function is deprecated, only the SPAPR + * path in kvm should call it. */ struct iommu_group *vfio_file_iommu_group(struct file *file) { struct vfio_group *group = file->private_data; + struct iommu_group *iommu_group = NULL; - if (file->f_op != &vfio_group_fops) + if (!IS_ENABLED(CONFIG_SPAPR_TCE_IOMMU)) return NULL; - return group->iommu_group; + + if (!vfio_file_is_group(file)) + return NULL; + + mutex_lock(&group->group_lock); + if (group->iommu_group) { + iommu_group = group->iommu_group; + iommu_group_ref_get(iommu_group); + } + mutex_unlock(&group->group_lock); + return iommu_group; } EXPORT_SYMBOL_GPL(vfio_file_iommu_group); /** + * vfio_file_is_group - True if the file is usable with VFIO aPIS + * @file: VFIO group file + */ +bool vfio_file_is_group(struct file *file) +{ + return file->f_op == &vfio_group_fops; +} +EXPORT_SYMBOL_GPL(vfio_file_is_group); + +/** * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file * is always CPU cache coherent * @file: VFIO group file @@ -1758,13 +1615,13 @@ bool vfio_file_enforced_coherent(struct file *file) struct vfio_group *group = file->private_data; bool ret; - if (file->f_op != &vfio_group_fops) + if (!vfio_file_is_group(file)) return true; - down_read(&group->group_rwsem); + mutex_lock(&group->group_lock); if (group->container) { - ret = vfio_ioctl_check_extension(group->container, - VFIO_DMA_CC_IOMMU); + ret = vfio_container_ioctl_check_extension(group->container, + VFIO_DMA_CC_IOMMU); } else { /* * Since the coherency state is determined only once a container @@ -1773,7 +1630,7 @@ bool vfio_file_enforced_coherent(struct file *file) */ ret = true; } - up_read(&group->group_rwsem); + mutex_unlock(&group->group_lock); return ret; } EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); @@ -1790,12 +1647,12 @@ void vfio_file_set_kvm(struct file *file, struct kvm *kvm) { struct vfio_group *group = file->private_data; - if (file->f_op != &vfio_group_fops) + if (!vfio_file_is_group(file)) return; - down_write(&group->group_rwsem); + mutex_lock(&group->group_lock); group->kvm = kvm; - up_write(&group->group_rwsem); + mutex_unlock(&group->group_lock); } EXPORT_SYMBOL_GPL(vfio_file_set_kvm); @@ -1810,7 +1667,7 @@ bool vfio_file_has_dev(struct file *file, struct vfio_device *device) { struct vfio_group *group = file->private_data; - if (file->f_op != &vfio_group_fops) + if (!vfio_file_is_group(file)) return false; return group == device->group; @@ -1937,114 +1794,6 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); /* - * Pin contiguous user pages and return their associated host pages for local - * domain only. - * @device [in] : device - * @iova [in] : starting IOVA of user pages to be pinned. - * @npage [in] : count of pages to be pinned. This count should not - * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. - * @prot [in] : protection flags - * @pages[out] : array of host pages - * Return error or number of pages pinned. - */ -int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, - int npage, int prot, struct page **pages) -{ - struct vfio_container *container; - struct vfio_group *group = device->group; - struct vfio_iommu_driver *driver; - int ret; - - if (!pages || !npage || !vfio_assert_device_open(device)) - return -EINVAL; - - if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) - return -E2BIG; - - if (group->dev_counter > 1) - return -EINVAL; - - /* group->container cannot change while a vfio device is open */ - container = group->container; - driver = container->iommu_driver; - if (likely(driver && driver->ops->pin_pages)) - ret = driver->ops->pin_pages(container->iommu_data, - group->iommu_group, iova, - npage, prot, pages); - else - ret = -ENOTTY; - - return ret; -} -EXPORT_SYMBOL(vfio_pin_pages); - -/* - * Unpin contiguous host pages for local domain only. - * @device [in] : device - * @iova [in] : starting address of user pages to be unpinned. - * @npage [in] : count of pages to be unpinned. This count should not - * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. - */ -void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) -{ - struct vfio_container *container; - struct vfio_iommu_driver *driver; - - if (WARN_ON(npage <= 0 || npage > VFIO_PIN_PAGES_MAX_ENTRIES)) - return; - - if (WARN_ON(!vfio_assert_device_open(device))) - return; - - /* group->container cannot change while a vfio device is open */ - container = device->group->container; - driver = container->iommu_driver; - - driver->ops->unpin_pages(container->iommu_data, iova, npage); -} -EXPORT_SYMBOL(vfio_unpin_pages); - -/* - * This interface allows the CPUs to perform some sort of virtual DMA on - * behalf of the device. - * - * CPUs read/write from/into a range of IOVAs pointing to user space memory - * into/from a kernel buffer. - * - * As the read/write of user space memory is conducted via the CPUs and is - * not a real device DMA, it is not necessary to pin the user space memory. - * - * @device [in] : VFIO device - * @iova [in] : base IOVA of a user space buffer - * @data [in] : pointer to kernel buffer - * @len [in] : kernel buffer length - * @write : indicate read or write - * Return error code on failure or 0 on success. - */ -int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, - size_t len, bool write) -{ - struct vfio_container *container; - struct vfio_iommu_driver *driver; - int ret = 0; - - if (!data || len <= 0 || !vfio_assert_device_open(device)) - return -EINVAL; - - /* group->container cannot change while a vfio device is open */ - container = device->group->container; - driver = container->iommu_driver; - - if (likely(driver && driver->ops->dma_rw)) - ret = driver->ops->dma_rw(container->iommu_data, - iova, data, len, write); - else - ret = -ENOTTY; - return ret; -} -EXPORT_SYMBOL(vfio_dma_rw); - -/* * Module/class support */ static char *vfio_devnode(struct device *dev, umode_t *mode) @@ -2052,59 +1801,50 @@ static char *vfio_devnode(struct device *dev, umode_t *mode) return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); } -static struct miscdevice vfio_dev = { - .minor = VFIO_MINOR, - .name = "vfio", - .fops = &vfio_fops, - .nodename = "vfio/vfio", - .mode = S_IRUGO | S_IWUGO, -}; - static int __init vfio_init(void) { int ret; ida_init(&vfio.group_ida); + ida_init(&vfio.device_ida); mutex_init(&vfio.group_lock); - mutex_init(&vfio.iommu_drivers_lock); INIT_LIST_HEAD(&vfio.group_list); - INIT_LIST_HEAD(&vfio.iommu_drivers_list); - ret = misc_register(&vfio_dev); - if (ret) { - pr_err("vfio: misc device register failed\n"); + ret = vfio_container_init(); + if (ret) return ret; - } /* /dev/vfio/$GROUP */ vfio.class = class_create(THIS_MODULE, "vfio"); if (IS_ERR(vfio.class)) { ret = PTR_ERR(vfio.class); - goto err_class; + goto err_group_class; } vfio.class->devnode = vfio_devnode; + /* /sys/class/vfio-dev/vfioX */ + vfio.device_class = class_create(THIS_MODULE, "vfio-dev"); + if (IS_ERR(vfio.device_class)) { + ret = PTR_ERR(vfio.device_class); + goto err_dev_class; + } + ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio"); if (ret) goto err_alloc_chrdev; -#ifdef CONFIG_VFIO_NOIOMMU - ret = vfio_register_iommu_driver(&vfio_noiommu_ops); -#endif - if (ret) - goto err_driver_register; - pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); return 0; -err_driver_register: - unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); err_alloc_chrdev: + class_destroy(vfio.device_class); + vfio.device_class = NULL; +err_dev_class: class_destroy(vfio.class); vfio.class = NULL; -err_class: - misc_deregister(&vfio_dev); +err_group_class: + vfio_container_cleanup(); return ret; } @@ -2112,14 +1852,14 @@ static void __exit vfio_cleanup(void) { WARN_ON(!list_empty(&vfio.group_list)); -#ifdef CONFIG_VFIO_NOIOMMU - vfio_unregister_iommu_driver(&vfio_noiommu_ops); -#endif + ida_destroy(&vfio.device_ida); ida_destroy(&vfio.group_ida); unregister_chrdev_region(vfio.group_devt, MINORMASK + 1); + class_destroy(vfio.device_class); + vfio.device_class = NULL; class_destroy(vfio.class); + vfio_container_cleanup(); vfio.class = NULL; - misc_deregister(&vfio_dev); xa_destroy(&vfio_device_set_xa); } diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 4df77eeb4d16..a6c86f916dbd 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -409,8 +409,8 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc); if (!err) return 0; - /* Is there an interrupt pin? If not give up. */ - if (!(to_vp_device(vdev)->pci_dev->pin)) + /* Is there an interrupt? If not give up. */ + if (!(to_vp_device(vdev)->pci_dev->irq)) return err; /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 76c3500b21c7..b64bc49c7f30 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1089,6 +1089,17 @@ config EBC_C384_WDT WinSystems EBC-C384 motherboard. The timeout may be configured via the timeout module parameter. +config EXAR_WDT + tristate "Exar Watchdog Timer" + depends on X86 + select WATCHDOG_CORE + help + Enables watchdog timer support for the watchdog timer present + in some Exar/MaxLinear UART chips like the XR28V38x. + + To compile this driver as a module, choose M here: the + module will be called exar_wdt. + config F71808E_WDT tristate "Fintek F718xx, F818xx Super I/O Watchdog" depends on X86 @@ -1315,7 +1326,7 @@ config IT87_WDT config HP_WATCHDOG tristate "HP ProLiant iLO2+ Hardware Watchdog Timer" select WATCHDOG_CORE - depends on X86 && PCI + depends on (ARM64 || X86) && PCI help A software monitoring watchdog and NMI handling driver. This driver will detect lockups and provide a stack trace. This is a driver that @@ -1325,7 +1336,7 @@ config HP_WATCHDOG config HPWDT_NMI_DECODING bool "NMI support for the HP ProLiant iLO2+ Hardware Watchdog Timer" - depends on HP_WATCHDOG + depends on X86 && HP_WATCHDOG default y help Enables the NMI handler for the watchdog pretimeout NMI and the iLO diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index cdeb119e6e61..d41e5f830ae7 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -105,6 +105,7 @@ obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o obj-$(CONFIG_EBC_C384_WDT) += ebc-c384_wdt.o +obj-$(CONFIG_EXAR_WDT) += exar_wdt.o obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o obj-$(CONFIG_GEODE_WDT) += geodewdt.o diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c index 854b1cc723cb..ac9fed1ef681 100644 --- a/drivers/watchdog/armada_37xx_wdt.c +++ b/drivers/watchdog/armada_37xx_wdt.c @@ -179,6 +179,8 @@ static int armada_37xx_wdt_set_timeout(struct watchdog_device *wdt, dev->timeout = (u64)dev->clk_rate * timeout; do_div(dev->timeout, CNTR_CTRL_PRESCALE_MIN); + set_counter_value(dev, CNTR_ID_WDOG, dev->timeout); + return 0; } diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index bd06622813eb..0cff2adfbfc9 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -332,18 +332,18 @@ static int aspeed_wdt_probe(struct platform_device *pdev) u32 reg = readl(wdt->base + WDT_RESET_WIDTH); reg &= config->ext_pulse_width_mask; - if (of_property_read_bool(np, "aspeed,ext-push-pull")) - reg |= WDT_PUSH_PULL_MAGIC; + if (of_property_read_bool(np, "aspeed,ext-active-high")) + reg |= WDT_ACTIVE_HIGH_MAGIC; else - reg |= WDT_OPEN_DRAIN_MAGIC; + reg |= WDT_ACTIVE_LOW_MAGIC; writel(reg, wdt->base + WDT_RESET_WIDTH); reg &= config->ext_pulse_width_mask; - if (of_property_read_bool(np, "aspeed,ext-active-high")) - reg |= WDT_ACTIVE_HIGH_MAGIC; + if (of_property_read_bool(np, "aspeed,ext-push-pull")) + reg |= WDT_PUSH_PULL_MAGIC; else - reg |= WDT_ACTIVE_LOW_MAGIC; + reg |= WDT_OPEN_DRAIN_MAGIC; writel(reg, wdt->base + WDT_RESET_WIDTH); } diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c index 0b6999f3b6e8..4a20e07fbb69 100644 --- a/drivers/watchdog/bd9576_wdt.c +++ b/drivers/watchdog/bd9576_wdt.c @@ -9,8 +9,8 @@ #include <linux/gpio/consumer.h> #include <linux/mfd/rohm-bd957x.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <linux/regmap.h> #include <linux/watchdog.h> @@ -202,10 +202,10 @@ static int bd957x_set_wdt_mode(struct bd9576_wdt_priv *priv, int hw_margin, static int bd9576_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np = dev->parent->of_node; struct bd9576_wdt_priv *priv; u32 hw_margin[2]; u32 hw_margin_max = BD957X_WDT_DEFAULT_MARGIN, hw_margin_min = 0; + int count; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -221,40 +221,51 @@ static int bd9576_wdt_probe(struct platform_device *pdev) return -ENODEV; } - priv->gpiod_en = devm_gpiod_get_from_of_node(dev, dev->parent->of_node, - "rohm,watchdog-enable-gpios", - 0, GPIOD_OUT_LOW, - "watchdog-enable"); + priv->gpiod_en = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent), + "rohm,watchdog-enable", + GPIOD_OUT_LOW, + "watchdog-enable"); if (IS_ERR(priv->gpiod_en)) return dev_err_probe(dev, PTR_ERR(priv->gpiod_en), "getting watchdog-enable GPIO failed\n"); - priv->gpiod_ping = devm_gpiod_get_from_of_node(dev, dev->parent->of_node, - "rohm,watchdog-ping-gpios", - 0, GPIOD_OUT_LOW, - "watchdog-ping"); + priv->gpiod_ping = devm_fwnode_gpiod_get(dev, dev_fwnode(dev->parent), + "rohm,watchdog-ping", + GPIOD_OUT_LOW, + "watchdog-ping"); if (IS_ERR(priv->gpiod_ping)) return dev_err_probe(dev, PTR_ERR(priv->gpiod_ping), "getting watchdog-ping GPIO failed\n"); - ret = of_property_read_variable_u32_array(np, "rohm,hw-timeout-ms", - &hw_margin[0], 1, 2); - if (ret < 0 && ret != -EINVAL) - return ret; + count = device_property_count_u32(dev->parent, "rohm,hw-timeout-ms"); + if (count < 0 && count != -EINVAL) + return count; + + if (count > 0) { + if (count > ARRAY_SIZE(hw_margin)) + return -EINVAL; - if (ret == 1) - hw_margin_max = hw_margin[0]; + ret = device_property_read_u32_array(dev->parent, + "rohm,hw-timeout-ms", + hw_margin, count); + if (ret < 0) + return ret; - if (ret == 2) { - hw_margin_max = hw_margin[1]; - hw_margin_min = hw_margin[0]; + if (count == 1) + hw_margin_max = hw_margin[0]; + + if (count == 2) { + hw_margin_max = hw_margin[1]; + hw_margin_min = hw_margin[0]; + } } ret = bd957x_set_wdt_mode(priv, hw_margin_max, hw_margin_min); if (ret) return ret; - priv->always_running = of_property_read_bool(np, "always-running"); + priv->always_running = device_property_read_bool(dev->parent, + "always-running"); watchdog_set_drvdata(&priv->wdd, priv); diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index ce682942662c..e26609ad4c17 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -192,7 +192,7 @@ static void eurwdt_ping(void) * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any - * write of data will do, as we we don't define content meaning. + * write of data will do, as we don't define content meaning. */ static ssize_t eurwdt_write(struct file *file, const char __user *buf, diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c new file mode 100644 index 000000000000..35058d8b21bc --- /dev/null +++ b/drivers/watchdog/exar_wdt.c @@ -0,0 +1,427 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * exar_wdt.c - Driver for the watchdog present in some + * Exar/MaxLinear UART chips like the XR28V38x. + * + * (c) Copyright 2022 D. Müller <[email protected]>. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/io.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/watchdog.h> + +#define DRV_NAME "exar_wdt" + +static const unsigned short sio_config_ports[] = { 0x2e, 0x4e }; +static const unsigned char sio_enter_keys[] = { 0x67, 0x77, 0x87, 0xA0 }; +#define EXAR_EXIT_KEY 0xAA + +#define EXAR_LDN 0x07 +#define EXAR_DID 0x20 +#define EXAR_VID 0x23 +#define EXAR_WDT 0x26 +#define EXAR_ACT 0x30 +#define EXAR_RTBASE 0x60 + +#define EXAR_WDT_LDEV 0x08 + +#define EXAR_VEN_ID 0x13A8 +#define EXAR_DEV_382 0x0382 +#define EXAR_DEV_384 0x0384 + +/* WDT runtime registers */ +#define WDT_CTRL 0x00 +#define WDT_VAL 0x01 + +#define WDT_UNITS_10MS 0x0 /* the 10 millisec unit of the HW is not used */ +#define WDT_UNITS_SEC 0x2 +#define WDT_UNITS_MIN 0x4 + +/* default WDT control for WDTOUT signal activ / rearm by read */ +#define EXAR_WDT_DEF_CONF 0 + +struct wdt_pdev_node { + struct list_head list; + struct platform_device *pdev; + const char name[16]; +}; + +struct wdt_priv { + /* the lock for WDT io operations */ + spinlock_t io_lock; + struct resource wdt_res; + struct watchdog_device wdt_dev; + unsigned short did; + unsigned short config_port; + unsigned char enter_key; + unsigned char unit; + unsigned char timeout; +}; + +#define WATCHDOG_TIMEOUT 60 + +static int timeout = WATCHDOG_TIMEOUT; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, + "Watchdog timeout in seconds. 1<=timeout<=15300, default=" + __MODULE_STRING(WATCHDOG_TIMEOUT) "."); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int exar_sio_enter(const unsigned short config_port, + const unsigned char key) +{ + if (!request_muxed_region(config_port, 2, DRV_NAME)) + return -EBUSY; + + /* write the ENTER-KEY twice */ + outb(key, config_port); + outb(key, config_port); + + return 0; +} + +static void exar_sio_exit(const unsigned short config_port) +{ + outb(EXAR_EXIT_KEY, config_port); + release_region(config_port, 2); +} + +static unsigned char exar_sio_read(const unsigned short config_port, + const unsigned char reg) +{ + outb(reg, config_port); + return inb(config_port + 1); +} + +static void exar_sio_write(const unsigned short config_port, + const unsigned char reg, const unsigned char val) +{ + outb(reg, config_port); + outb(val, config_port + 1); +} + +static unsigned short exar_sio_read16(const unsigned short config_port, + const unsigned char reg) +{ + unsigned char msb, lsb; + + msb = exar_sio_read(config_port, reg); + lsb = exar_sio_read(config_port, reg + 1); + + return (msb << 8) | lsb; +} + +static void exar_sio_select_wdt(const unsigned short config_port) +{ + exar_sio_write(config_port, EXAR_LDN, EXAR_WDT_LDEV); +} + +static void exar_wdt_arm(const struct wdt_priv *priv) +{ + unsigned short rt_base = priv->wdt_res.start; + + /* write timeout value twice to arm watchdog */ + outb(priv->timeout, rt_base + WDT_VAL); + outb(priv->timeout, rt_base + WDT_VAL); +} + +static void exar_wdt_disarm(const struct wdt_priv *priv) +{ + unsigned short rt_base = priv->wdt_res.start; + + /* + * use two accesses with different values to make sure + * that a combination of a previous single access and + * the ones below with the same value are not falsely + * interpreted as "arm watchdog" + */ + outb(0xFF, rt_base + WDT_VAL); + outb(0, rt_base + WDT_VAL); +} + +static int exar_wdt_start(struct watchdog_device *wdog) +{ + struct wdt_priv *priv = watchdog_get_drvdata(wdog); + unsigned short rt_base = priv->wdt_res.start; + + spin_lock(&priv->io_lock); + + exar_wdt_disarm(priv); + outb(priv->unit, rt_base + WDT_CTRL); + exar_wdt_arm(priv); + + spin_unlock(&priv->io_lock); + return 0; +} + +static int exar_wdt_stop(struct watchdog_device *wdog) +{ + struct wdt_priv *priv = watchdog_get_drvdata(wdog); + + spin_lock(&priv->io_lock); + + exar_wdt_disarm(priv); + + spin_unlock(&priv->io_lock); + return 0; +} + +static int exar_wdt_keepalive(struct watchdog_device *wdog) +{ + struct wdt_priv *priv = watchdog_get_drvdata(wdog); + unsigned short rt_base = priv->wdt_res.start; + + spin_lock(&priv->io_lock); + + /* reading the WDT_VAL reg will feed the watchdog */ + inb(rt_base + WDT_VAL); + + spin_unlock(&priv->io_lock); + return 0; +} + +static int exar_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t) +{ + struct wdt_priv *priv = watchdog_get_drvdata(wdog); + bool unit_min = false; + + /* + * if new timeout is bigger then 255 seconds, change the + * unit to minutes and round the timeout up to the next whole minute + */ + if (t > 255) { + unit_min = true; + t = DIV_ROUND_UP(t, 60); + } + + /* save for later use in exar_wdt_start() */ + priv->unit = unit_min ? WDT_UNITS_MIN : WDT_UNITS_SEC; + priv->timeout = t; + + wdog->timeout = unit_min ? t * 60 : t; + + if (watchdog_hw_running(wdog)) + exar_wdt_start(wdog); + + return 0; +} + +static const struct watchdog_info exar_wdt_info = { + .options = WDIOF_KEEPALIVEPING | + WDIOF_SETTIMEOUT | + WDIOF_MAGICCLOSE, + .identity = "Exar/MaxLinear XR28V38x Watchdog", +}; + +static const struct watchdog_ops exar_wdt_ops = { + .owner = THIS_MODULE, + .start = exar_wdt_start, + .stop = exar_wdt_stop, + .ping = exar_wdt_keepalive, + .set_timeout = exar_wdt_set_timeout, +}; + +static int exar_wdt_config(struct watchdog_device *wdog, + const unsigned char conf) +{ + struct wdt_priv *priv = watchdog_get_drvdata(wdog); + int ret; + + ret = exar_sio_enter(priv->config_port, priv->enter_key); + if (ret) + return ret; + + exar_sio_select_wdt(priv->config_port); + exar_sio_write(priv->config_port, EXAR_WDT, conf); + + exar_sio_exit(priv->config_port); + + return 0; +} + +static int __init exar_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct wdt_priv *priv = dev->platform_data; + struct watchdog_device *wdt_dev = &priv->wdt_dev; + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) + return -ENXIO; + + spin_lock_init(&priv->io_lock); + + wdt_dev->info = &exar_wdt_info; + wdt_dev->ops = &exar_wdt_ops; + wdt_dev->min_timeout = 1; + wdt_dev->max_timeout = 255 * 60; + + watchdog_init_timeout(wdt_dev, timeout, NULL); + watchdog_set_nowayout(wdt_dev, nowayout); + watchdog_stop_on_reboot(wdt_dev); + watchdog_stop_on_unregister(wdt_dev); + watchdog_set_drvdata(wdt_dev, priv); + + ret = exar_wdt_config(wdt_dev, EXAR_WDT_DEF_CONF); + if (ret) + return ret; + + exar_wdt_set_timeout(wdt_dev, timeout); + /* Make sure that the watchdog is not running */ + exar_wdt_stop(wdt_dev); + + ret = devm_watchdog_register_device(dev, wdt_dev); + if (ret) + return ret; + + dev_info(dev, "XR28V%X WDT initialized. timeout=%d sec (nowayout=%d)\n", + priv->did, timeout, nowayout); + + return 0; +} + +static unsigned short __init exar_detect(const unsigned short config_port, + const unsigned char key, + unsigned short *rt_base) +{ + int ret; + unsigned short base = 0; + unsigned short vid, did; + + ret = exar_sio_enter(config_port, key); + if (ret) + return 0; + + vid = exar_sio_read16(config_port, EXAR_VID); + did = exar_sio_read16(config_port, EXAR_DID); + + /* check for the vendor and device IDs we currently know about */ + if (vid == EXAR_VEN_ID && + (did == EXAR_DEV_382 || + did == EXAR_DEV_384)) { + exar_sio_select_wdt(config_port); + /* is device active? */ + if (exar_sio_read(config_port, EXAR_ACT) == 0x01) + base = exar_sio_read16(config_port, EXAR_RTBASE); + } + + exar_sio_exit(config_port); + + if (base) { + pr_debug("Found a XR28V%X WDT (conf: 0x%x / rt: 0x%04x)\n", + did, config_port, base); + *rt_base = base; + return did; + } + + return 0; +} + +static struct platform_driver exar_wdt_driver = { + .driver = { + .name = DRV_NAME, + }, +}; + +static LIST_HEAD(pdev_list); + +static int __init exar_wdt_register(struct wdt_priv *priv, const int idx) +{ + struct wdt_pdev_node *n; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) + return -ENOMEM; + + INIT_LIST_HEAD(&n->list); + + scnprintf((char *)n->name, sizeof(n->name), DRV_NAME ".%d", idx); + priv->wdt_res.name = n->name; + + n->pdev = platform_device_register_resndata(NULL, DRV_NAME, idx, + &priv->wdt_res, 1, + priv, sizeof(*priv)); + if (IS_ERR(n->pdev)) { + kfree(n); + return PTR_ERR(n->pdev); + } + + list_add_tail(&n->list, &pdev_list); + + return 0; +} + +static void exar_wdt_unregister(void) +{ + struct wdt_pdev_node *n, *t; + + list_for_each_entry_safe(n, t, &pdev_list, list) { + platform_device_unregister(n->pdev); + list_del(&n->list); + kfree(n); + } +} + +static int __init exar_wdt_init(void) +{ + int ret, i, j, idx = 0; + + /* search for active Exar watchdogs on all possible locations */ + for (i = 0; i < ARRAY_SIZE(sio_config_ports); i++) { + for (j = 0; j < ARRAY_SIZE(sio_enter_keys); j++) { + unsigned short did, rt_base = 0; + + did = exar_detect(sio_config_ports[i], + sio_enter_keys[j], + &rt_base); + + if (did) { + struct wdt_priv priv = { + .wdt_res = DEFINE_RES_IO(rt_base, 2), + .did = did, + .config_port = sio_config_ports[i], + .enter_key = sio_enter_keys[j], + }; + + ret = exar_wdt_register(&priv, idx); + if (!ret) + idx++; + } + } + } + + if (!idx) + return -ENODEV; + + ret = platform_driver_probe(&exar_wdt_driver, exar_wdt_probe); + if (ret) + exar_wdt_unregister(); + + return ret; +} + +static void __exit exar_wdt_exit(void) +{ + exar_wdt_unregister(); + platform_driver_unregister(&exar_wdt_driver); +} + +module_init(exar_wdt_init); +module_exit(exar_wdt_exit); + +MODULE_AUTHOR("David Müller <[email protected]>"); +MODULE_DESCRIPTION("Exar/MaxLinear Watchdog Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c index 21dcc7765688..442c5bf63ff4 100644 --- a/drivers/watchdog/ftwdt010_wdt.c +++ b/drivers/watchdog/ftwdt010_wdt.c @@ -47,21 +47,28 @@ struct ftwdt010_wdt *to_ftwdt010_wdt(struct watchdog_device *wdd) return container_of(wdd, struct ftwdt010_wdt, wdd); } -static int ftwdt010_wdt_start(struct watchdog_device *wdd) +static void ftwdt010_enable(struct ftwdt010_wdt *gwdt, + unsigned int timeout, + bool need_irq) { - struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd); u32 enable; - writel(wdd->timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD); + writel(timeout * WDT_CLOCK, gwdt->base + FTWDT010_WDLOAD); writel(WDRESTART_MAGIC, gwdt->base + FTWDT010_WDRESTART); /* set clock before enabling */ enable = WDCR_CLOCK_5MHZ | WDCR_SYS_RST; writel(enable, gwdt->base + FTWDT010_WDCR); - if (gwdt->has_irq) + if (need_irq) enable |= WDCR_WDINTR; enable |= WDCR_ENABLE; writel(enable, gwdt->base + FTWDT010_WDCR); +} +static int ftwdt010_wdt_start(struct watchdog_device *wdd) +{ + struct ftwdt010_wdt *gwdt = to_ftwdt010_wdt(wdd); + + ftwdt010_enable(gwdt, wdd->timeout, gwdt->has_irq); return 0; } @@ -93,6 +100,13 @@ static int ftwdt010_wdt_set_timeout(struct watchdog_device *wdd, return 0; } +static int ftwdt010_wdt_restart(struct watchdog_device *wdd, + unsigned long action, void *data) +{ + ftwdt010_enable(to_ftwdt010_wdt(wdd), 0, false); + return 0; +} + static irqreturn_t ftwdt010_wdt_interrupt(int irq, void *data) { struct ftwdt010_wdt *gwdt = data; @@ -107,6 +121,7 @@ static const struct watchdog_ops ftwdt010_wdt_ops = { .stop = ftwdt010_wdt_stop, .ping = ftwdt010_wdt_ping, .set_timeout = ftwdt010_wdt_set_timeout, + .restart = ftwdt010_wdt_restart, .owner = THIS_MODULE, }; @@ -156,7 +171,7 @@ static int ftwdt010_wdt_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq) { + if (irq > 0) { ret = devm_request_irq(dev, irq, ftwdt010_wdt_interrupt, 0, "watchdog bark", gwdt); if (ret) diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index a5006a58e0db..f79f932bca14 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -20,7 +20,9 @@ #include <linux/pci_ids.h> #include <linux/types.h> #include <linux/watchdog.h> +#ifdef CONFIG_HPWDT_NMI_DECODING #include <asm/nmi.h> +#endif #include <linux/crash_dump.h> #define HPWDT_VERSION "2.0.4" diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 922b60374295..2897902090b3 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -9,12 +9,15 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/watchdog.h> #define WDOG_CS 0x0 +#define WDOG_CS_FLG BIT(14) #define WDOG_CS_CMD32EN BIT(13) +#define WDOG_CS_PRES BIT(12) #define WDOG_CS_ULK BIT(11) #define WDOG_CS_RCS BIT(10) #define LPO_CLK 0x1 @@ -39,60 +42,105 @@ #define DEFAULT_TIMEOUT 60 #define MAX_TIMEOUT 128 #define WDOG_CLOCK_RATE 1000 -#define WDOG_WAIT_TIMEOUT 20 +#define WDOG_ULK_WAIT_TIMEOUT 1000 +#define WDOG_RCS_WAIT_TIMEOUT 10000 +#define WDOG_RCS_POST_WAIT 3000 + +#define RETRY_MAX 5 static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0000); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +struct imx_wdt_hw_feature { + bool prescaler_enable; + u32 wdog_clock_rate; +}; + struct imx7ulp_wdt_device { struct watchdog_device wdd; void __iomem *base; struct clk *clk; + bool post_rcs_wait; + const struct imx_wdt_hw_feature *hw; }; -static int imx7ulp_wdt_wait(void __iomem *base, u32 mask) +static int imx7ulp_wdt_wait_ulk(void __iomem *base) { u32 val = readl(base + WDOG_CS); - if (!(val & mask) && readl_poll_timeout_atomic(base + WDOG_CS, val, - val & mask, 0, - WDOG_WAIT_TIMEOUT)) + if (!(val & WDOG_CS_ULK) && + readl_poll_timeout_atomic(base + WDOG_CS, val, + val & WDOG_CS_ULK, 0, + WDOG_ULK_WAIT_TIMEOUT)) return -ETIMEDOUT; return 0; } -static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable) +static int imx7ulp_wdt_wait_rcs(struct imx7ulp_wdt_device *wdt) { - struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); + int ret = 0; + u32 val = readl(wdt->base + WDOG_CS); + u64 timeout = (val & WDOG_CS_PRES) ? + WDOG_RCS_WAIT_TIMEOUT * 256 : WDOG_RCS_WAIT_TIMEOUT; + unsigned long wait_min = (val & WDOG_CS_PRES) ? + WDOG_RCS_POST_WAIT * 256 : WDOG_RCS_POST_WAIT; + + if (!(val & WDOG_CS_RCS) && + readl_poll_timeout(wdt->base + WDOG_CS, val, val & WDOG_CS_RCS, 100, + timeout)) + ret = -ETIMEDOUT; + + /* Wait 2.5 clocks after RCS done */ + if (wdt->post_rcs_wait) + usleep_range(wait_min, wait_min + 2000); + + return ret; +} +static int _imx7ulp_wdt_enable(struct imx7ulp_wdt_device *wdt, bool enable) +{ u32 val = readl(wdt->base + WDOG_CS); int ret; local_irq_disable(); writel(UNLOCK, wdt->base + WDOG_CNT); - ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK); + ret = imx7ulp_wdt_wait_ulk(wdt->base); if (ret) goto enable_out; if (enable) writel(val | WDOG_CS_EN, wdt->base + WDOG_CS); else writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS); - imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS); -enable_out: local_irq_enable(); + ret = imx7ulp_wdt_wait_rcs(wdt); return ret; + +enable_out: + local_irq_enable(); + return ret; } -static bool imx7ulp_wdt_is_enabled(void __iomem *base) +static int imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable) { - u32 val = readl(base + WDOG_CS); + struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); + int ret; + u32 val; + u32 loop = RETRY_MAX; + + do { + ret = _imx7ulp_wdt_enable(wdt, enable); + val = readl(wdt->base + WDOG_CS); + } while (--loop > 0 && ((!!(val & WDOG_CS_EN)) != enable || ret)); - return val & WDOG_CS_EN; + if (loop == 0) + return -EBUSY; + + return ret; } static int imx7ulp_wdt_ping(struct watchdog_device *wdog) @@ -114,26 +162,44 @@ static int imx7ulp_wdt_stop(struct watchdog_device *wdog) return imx7ulp_wdt_enable(wdog, false); } -static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog, - unsigned int timeout) +static int _imx7ulp_wdt_set_timeout(struct imx7ulp_wdt_device *wdt, + unsigned int toval) { - struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); - u32 val = WDOG_CLOCK_RATE * timeout; int ret; local_irq_disable(); writel(UNLOCK, wdt->base + WDOG_CNT); - ret = imx7ulp_wdt_wait(wdt->base, WDOG_CS_ULK); + ret = imx7ulp_wdt_wait_ulk(wdt->base); if (ret) goto timeout_out; - writel(val, wdt->base + WDOG_TOVAL); - imx7ulp_wdt_wait(wdt->base, WDOG_CS_RCS); - - wdog->timeout = timeout; + writel(toval, wdt->base + WDOG_TOVAL); + local_irq_enable(); + ret = imx7ulp_wdt_wait_rcs(wdt); + return ret; timeout_out: local_irq_enable(); + return ret; +} +static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog, + unsigned int timeout) +{ + struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog); + u32 toval = wdt->hw->wdog_clock_rate * timeout; + u32 val; + int ret; + u32 loop = RETRY_MAX; + + do { + ret = _imx7ulp_wdt_set_timeout(wdt, toval); + val = readl(wdt->base + WDOG_TOVAL); + } while (--loop > 0 && (val != toval || ret)); + + if (loop == 0) + return -EBUSY; + + wdog->timeout = timeout; return ret; } @@ -173,29 +239,62 @@ static const struct watchdog_info imx7ulp_wdt_info = { WDIOF_MAGICCLOSE, }; -static int imx7ulp_wdt_init(void __iomem *base, unsigned int timeout) +static int _imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout, unsigned int cs) { u32 val; int ret; local_irq_disable(); - /* unlock the wdog for reconfiguration */ - writel_relaxed(UNLOCK_SEQ0, base + WDOG_CNT); - writel_relaxed(UNLOCK_SEQ1, base + WDOG_CNT); - ret = imx7ulp_wdt_wait(base, WDOG_CS_ULK); + + val = readl(wdt->base + WDOG_CS); + if (val & WDOG_CS_CMD32EN) { + writel(UNLOCK, wdt->base + WDOG_CNT); + } else { + mb(); + /* unlock the wdog for reconfiguration */ + writel_relaxed(UNLOCK_SEQ0, wdt->base + WDOG_CNT); + writel_relaxed(UNLOCK_SEQ1, wdt->base + WDOG_CNT); + mb(); + } + + ret = imx7ulp_wdt_wait_ulk(wdt->base); if (ret) goto init_out; /* set an initial timeout value in TOVAL */ - writel(timeout, base + WDOG_TOVAL); - /* enable 32bit command sequence and reconfigure */ - val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE | - WDOG_CS_WAIT | WDOG_CS_STOP; - writel(val, base + WDOG_CS); - imx7ulp_wdt_wait(base, WDOG_CS_RCS); + writel(timeout, wdt->base + WDOG_TOVAL); + writel(cs, wdt->base + WDOG_CS); + local_irq_enable(); + ret = imx7ulp_wdt_wait_rcs(wdt); + + return ret; init_out: local_irq_enable(); + return ret; +} + +static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout) +{ + /* enable 32bit command sequence and reconfigure */ + u32 val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE | + WDOG_CS_WAIT | WDOG_CS_STOP; + u32 cs, toval; + int ret; + u32 loop = RETRY_MAX; + + if (wdt->hw->prescaler_enable) + val |= WDOG_CS_PRES; + + do { + ret = _imx7ulp_wdt_init(wdt, timeout, val); + toval = readl(wdt->base + WDOG_TOVAL); + cs = readl(wdt->base + WDOG_CS); + cs &= ~(WDOG_CS_FLG | WDOG_CS_ULK | WDOG_CS_RCS); + } while (--loop > 0 && (cs != val || toval != timeout || ret)); + + if (loop == 0) + return -EBUSY; return ret; } @@ -228,6 +327,15 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev) return PTR_ERR(imx7ulp_wdt->clk); } + imx7ulp_wdt->post_rcs_wait = true; + if (of_device_is_compatible(dev->of_node, + "fsl,imx8ulp-wdt")) { + dev_info(dev, "imx8ulp wdt probe\n"); + imx7ulp_wdt->post_rcs_wait = false; + } else { + dev_info(dev, "imx7ulp wdt probe\n"); + } + ret = clk_prepare_enable(imx7ulp_wdt->clk); if (ret) return ret; @@ -248,14 +356,16 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev) watchdog_stop_on_reboot(wdog); watchdog_stop_on_unregister(wdog); watchdog_set_drvdata(wdog, imx7ulp_wdt); - ret = imx7ulp_wdt_init(imx7ulp_wdt->base, wdog->timeout * WDOG_CLOCK_RATE); + + imx7ulp_wdt->hw = of_device_get_match_data(dev); + ret = imx7ulp_wdt_init(imx7ulp_wdt, wdog->timeout * imx7ulp_wdt->hw->wdog_clock_rate); if (ret) return ret; return devm_watchdog_register_device(dev, wdog); } -static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev) +static int __maybe_unused imx7ulp_wdt_suspend_noirq(struct device *dev) { struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev); @@ -267,30 +377,44 @@ static int __maybe_unused imx7ulp_wdt_suspend(struct device *dev) return 0; } -static int __maybe_unused imx7ulp_wdt_resume(struct device *dev) +static int __maybe_unused imx7ulp_wdt_resume_noirq(struct device *dev) { struct imx7ulp_wdt_device *imx7ulp_wdt = dev_get_drvdata(dev); - u32 timeout = imx7ulp_wdt->wdd.timeout * WDOG_CLOCK_RATE; + u32 timeout = imx7ulp_wdt->wdd.timeout * imx7ulp_wdt->hw->wdog_clock_rate; int ret; ret = clk_prepare_enable(imx7ulp_wdt->clk); if (ret) return ret; - if (imx7ulp_wdt_is_enabled(imx7ulp_wdt->base)) - imx7ulp_wdt_init(imx7ulp_wdt->base, timeout); - - if (watchdog_active(&imx7ulp_wdt->wdd)) + if (watchdog_active(&imx7ulp_wdt->wdd)) { + imx7ulp_wdt_init(imx7ulp_wdt, timeout); imx7ulp_wdt_start(&imx7ulp_wdt->wdd); + imx7ulp_wdt_ping(&imx7ulp_wdt->wdd); + } return 0; } -static SIMPLE_DEV_PM_OPS(imx7ulp_wdt_pm_ops, imx7ulp_wdt_suspend, - imx7ulp_wdt_resume); +static const struct dev_pm_ops imx7ulp_wdt_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx7ulp_wdt_suspend_noirq, + imx7ulp_wdt_resume_noirq) +}; + +static const struct imx_wdt_hw_feature imx7ulp_wdt_hw = { + .prescaler_enable = false, + .wdog_clock_rate = 1000, +}; + +static const struct imx_wdt_hw_feature imx93_wdt_hw = { + .prescaler_enable = true, + .wdog_clock_rate = 125, +}; static const struct of_device_id imx7ulp_wdt_dt_ids[] = { - { .compatible = "fsl,imx7ulp-wdt", }, + { .compatible = "fsl,imx8ulp-wdt", .data = &imx7ulp_wdt_hw, }, + { .compatible = "fsl,imx7ulp-wdt", .data = &imx7ulp_wdt_hw, }, + { .compatible = "fsl,imx93-wdt", .data = &imx93_wdt_hw, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx7ulp_wdt_dt_ids); diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c index d3c9e2f6e63b..981a2f7c3bec 100644 --- a/drivers/watchdog/meson_gxbb_wdt.c +++ b/drivers/watchdog/meson_gxbb_wdt.c @@ -156,6 +156,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct meson_gxbb_wdt *data; int ret; + u32 ctrl_reg; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -189,13 +190,26 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev) watchdog_set_nowayout(&data->wdt_dev, nowayout); watchdog_set_drvdata(&data->wdt_dev, data); + ctrl_reg = readl(data->reg_base + GXBB_WDT_CTRL_REG) & + GXBB_WDT_CTRL_EN; + + if (ctrl_reg) { + /* Watchdog is running - keep it running but extend timeout + * to the maximum while setting the timebase + */ + set_bit(WDOG_HW_RUNNING, &data->wdt_dev.status); + meson_gxbb_wdt_set_timeout(&data->wdt_dev, + GXBB_WDT_TCNT_SETUP_MASK / 1000); + } + /* Setup with 1ms timebase */ - writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) | - GXBB_WDT_CTRL_EE_RESET | - GXBB_WDT_CTRL_CLK_EN | - GXBB_WDT_CTRL_CLKDIV_EN, - data->reg_base + GXBB_WDT_CTRL_REG); + ctrl_reg |= ((clk_get_rate(data->clk) / 1000) & + GXBB_WDT_CTRL_DIV_MASK) | + GXBB_WDT_CTRL_EE_RESET | + GXBB_WDT_CTRL_CLK_EN | + GXBB_WDT_CTRL_CLKDIV_EN; + writel(ctrl_reg, data->reg_base + GXBB_WDT_CTRL_REG); meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout); return devm_watchdog_register_device(dev, &data->wdt_dev); diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c index 28a24caa2627..a5dd1c230137 100644 --- a/drivers/watchdog/npcm_wdt.c +++ b/drivers/watchdog/npcm_wdt.c @@ -3,6 +3,7 @@ // Copyright (c) 2018 IBM Corp. #include <linux/bitops.h> +#include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> @@ -43,6 +44,7 @@ struct npcm_wdt { struct watchdog_device wdd; void __iomem *reg; + struct clk *clk; }; static inline struct npcm_wdt *to_npcm_wdt(struct watchdog_device *wdd) @@ -66,6 +68,9 @@ static int npcm_wdt_start(struct watchdog_device *wdd) struct npcm_wdt *wdt = to_npcm_wdt(wdd); u32 val; + if (wdt->clk) + clk_prepare_enable(wdt->clk); + if (wdd->timeout < 2) val = 0x800; else if (wdd->timeout < 3) @@ -100,6 +105,9 @@ static int npcm_wdt_stop(struct watchdog_device *wdd) writel(0, wdt->reg); + if (wdt->clk) + clk_disable_unprepare(wdt->clk); + return 0; } @@ -147,6 +155,10 @@ static int npcm_wdt_restart(struct watchdog_device *wdd, { struct npcm_wdt *wdt = to_npcm_wdt(wdd); + /* For reset, we start the WDT clock and leave it running. */ + if (wdt->clk) + clk_prepare_enable(wdt->clk); + writel(NPCM_WTR | NPCM_WTRE | NPCM_WTE, wdt->reg); udelay(1000); @@ -191,6 +203,10 @@ static int npcm_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdt->reg)) return PTR_ERR(wdt->reg); + wdt->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(wdt->clk)) + return PTR_ERR(wdt->clk); + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 053ef3bde12d..6e9253761fc1 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -225,9 +225,8 @@ static int rti_wdt_probe(struct platform_device *pdev) wdt->freq = wdt->freq * 9 / 10; pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); } diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c index 6eea0ee4af49..974a4194a8fd 100644 --- a/drivers/watchdog/rzg2l_wdt.c +++ b/drivers/watchdog/rzg2l_wdt.c @@ -10,7 +10,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> @@ -40,6 +40,11 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +enum rz_wdt_type { + WDT_RZG2L, + WDT_RZV2M, +}; + struct rzg2l_wdt_priv { void __iomem *base; struct watchdog_device wdev; @@ -48,6 +53,7 @@ struct rzg2l_wdt_priv { unsigned long delay; struct clk *pclk; struct clk *osc_clk; + enum rz_wdt_type devtype; }; static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv) @@ -142,11 +148,29 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev, clk_prepare_enable(priv->pclk); clk_prepare_enable(priv->osc_clk); - /* Generate Reset (WDTRSTB) Signal on parity error */ - rzg2l_wdt_write(priv, 0, PECR); + if (priv->devtype == WDT_RZG2L) { + /* Generate Reset (WDTRSTB) Signal on parity error */ + rzg2l_wdt_write(priv, 0, PECR); + + /* Force parity error */ + rzg2l_wdt_write(priv, PEEN_FORCE, PEEN); + } else { + /* RZ/V2M doesn't have parity error registers */ + + wdev->timeout = 0; + + /* Initialize time out */ + rzg2l_wdt_init_timeout(wdev); - /* Force parity error */ - rzg2l_wdt_write(priv, PEEN_FORCE, PEEN); + /* Initialize watchdog counter register */ + rzg2l_wdt_write(priv, 0, WDTTIM); + + /* Enable watchdog timer*/ + rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT); + + /* Wait 2 consecutive overflow cycles for reset */ + mdelay(DIV_ROUND_UP(2 * 0xFFFFF * 1000, priv->osc_clk_rate)); + } return 0; } @@ -227,6 +251,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "failed to deassert"); + priv->devtype = (uintptr_t)of_device_get_match_data(dev); + pm_runtime_enable(&pdev->dev); priv->wdev.info = &rzg2l_wdt_ident; @@ -255,7 +281,8 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) } static const struct of_device_id rzg2l_wdt_ids[] = { - { .compatible = "renesas,rzg2l-wdt", }, + { .compatible = "renesas,rzg2l-wdt", .data = (void *)WDT_RZG2L }, + { .compatible = "renesas,rzv2m-wdt", .data = (void *)WDT_RZV2M }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 95919392927f..d3fc8ed886ff 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -60,9 +60,13 @@ #define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244 #define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620 #define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644 +#define EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT 0x1520 +#define EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN 0x1544 #define EXYNOS850_CLUSTER0_WDTRESET_BIT 24 #define EXYNOS850_CLUSTER1_WDTRESET_BIT 23 +#define EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT 25 +#define EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT 24 /** * DOC: Quirk flags for different Samsung watchdog IP-cores @@ -236,6 +240,30 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = { QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, }; +static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = { + .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOSAUTOV9_CLUSTER0_WDTRESET_BIT, + .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, +}; + +static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = { + .mask_reset_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOSAUTOV9_CLUSTER1_WDTRESET_BIT, + .cnt_en_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, +}; + static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt", .data = &drv_data_s3c2410 }, @@ -249,6 +277,8 @@ static const struct of_device_id s3c2410_wdt_match[] = { .data = &drv_data_exynos7 }, { .compatible = "samsung,exynos850-wdt", .data = &drv_data_exynos850_cl0 }, + { .compatible = "samsung,exynosautov9-wdt", + .data = &drv_data_exynosautov9_cl0 }, {}, }; MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); @@ -630,8 +660,9 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev) } #ifdef CONFIG_OF - /* Choose Exynos850 driver data w.r.t. cluster index */ - if (variant == &drv_data_exynos850_cl0) { + /* Choose Exynos850/ExynosAutov9 driver data w.r.t. cluster index */ + if (variant == &drv_data_exynos850_cl0 || + variant == &drv_data_exynosautov9_cl0) { u32 index; int err; @@ -644,9 +675,11 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev) switch (index) { case 0: - return &drv_data_exynos850_cl0; + return variant; case 1: - return &drv_data_exynos850_cl1; + return (variant == &drv_data_exynos850_cl0) ? + &drv_data_exynos850_cl1 : + &drv_data_exynosautov9_cl1; default: dev_err(dev, "wrong cluster index: %u\n", index); return NULL; diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index 2d0a06a158a8..82ac5d19f519 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -238,7 +238,7 @@ static int sa1100dog_remove(struct platform_device *pdev) return 0; } -struct platform_driver sa1100dog_driver = { +static struct platform_driver sa1100dog_driver = { .driver.name = "sa1100_wdt", .probe = sa1100dog_probe, .remove = sa1100dog_remove, diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index ae54dd33e233..fb426b7d81da 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -65,6 +65,12 @@ static struct pci_dev *sp5100_tco_pci; /* module parameters */ +#define WATCHDOG_ACTION 0 +static bool action = WATCHDOG_ACTION; +module_param(action, bool, 0); +MODULE_PARM_DESC(action, "Action taken when watchdog expires, 0 to reset, 1 to poweroff (default=" + __MODULE_STRING(WATCHDOG_ACTION) ")"); + #define WATCHDOG_HEARTBEAT 60 /* 60 sec default heartbeat. */ static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ module_param(heartbeat, int, 0); @@ -297,8 +303,11 @@ static int sp5100_tco_timer_init(struct sp5100_tco *tco) if (val & SP5100_WDT_FIRED) wdd->bootstatus = WDIOF_CARDRESET; - /* Set watchdog action to reset the system */ - val &= ~SP5100_WDT_ACTION_RESET; + /* Set watchdog action */ + if (action) + val |= SP5100_WDT_ACTION_RESET; + else + val &= ~SP5100_WDT_ACTION_RESET; writel(val, SP5100_WDT_CONTROL(tco->tcobase)); /* Set a reasonable heartbeat before we stop the timer */ diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c index 355e428c0b99..36b4a660928d 100644 --- a/drivers/watchdog/twl4030_wdt.c +++ b/drivers/watchdog/twl4030_wdt.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> +#include <linux/mod_devicetable.h> #include <linux/watchdog.h> #include <linux/platform_device.h> #include <linux/mfd/twl.h> diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 56a4a4030ca9..bc33b63c5a5d 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -113,6 +113,10 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)"); #define W836X7HF_WDT_CSR 0xf7 #define NCT6102D_WDT_CSR 0xf2 +#define WDT_CSR_STATUS 0x10 +#define WDT_CSR_KBD 0x40 +#define WDT_CSR_MOUSE 0x80 + static void superio_outb(int reg, int val) { outb(reg, WDT_EFER); @@ -244,8 +248,12 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip) t = superio_inb(cr_wdt_control) & ~0x0C; superio_outb(cr_wdt_control, t); - /* reset trigger, disable keyboard & mouse turning off watchdog */ - t = superio_inb(cr_wdt_csr) & ~0xD0; + t = superio_inb(cr_wdt_csr); + if (t & WDT_CSR_STATUS) + wdog->bootstatus |= WDIOF_CARDRESET; + + /* reset status, disable keyboard & mouse turning off watchdog */ + t &= ~(WDT_CSR_STATUS | WDT_CSR_KBD | WDT_CSR_MOUSE); superio_outb(cr_wdt_csr, t); superio_exit(); diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index fd64ae77780a..31bf21ceaf48 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -321,7 +321,7 @@ static int wdt_release(struct inode *inode, struct file *file) * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any - * write of data will do, as we we don't define content meaning. + * write of data will do, as we don't define content meaning. */ static ssize_t wdt_write(struct file *file, const char __user *buf, diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 54903f3c851e..744b2ab75288 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -1015,7 +1015,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) wd_data->dev.groups = wdd->groups; wd_data->dev.release = watchdog_core_data_release; dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + err = dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + if (err) { + put_device(&wd_data->dev); + return err; + } kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index aeadaa07c891..ce7a4a9e4b03 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -342,9 +342,8 @@ static int wdat_wdt_probe(struct platform_device *pdev) return -EINVAL; wdat->period = tbl->timer_period; - wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count; - wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count; - wdat->wdd.min_timeout = 1; + wdat->wdd.min_timeout = DIV_ROUND_UP(wdat->period * tbl->min_count, 1000); + wdat->wdd.max_timeout = wdat->period * tbl->max_count / 1000; wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED; wdat->wdd.info = &wdat_wdt_info; wdat->wdd.ops = &wdat_wdt_ops; diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index a65bd92121a5..d5d7c402b651 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -56,7 +56,7 @@ config XEN_MEMORY_HOTPLUG_LIMIT depends on XEN_HAVE_PVMMU depends on MEMORY_HOTPLUG help - Maxmium amount of memory (in GiB) that a PV guest can be + Maximum amount of memory (in GiB) that a PV guest can be expanded to when using memory hotplug. A PV guest can have more memory than this limit if is diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 40ef379c28ab..9c286b2a1900 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -44,9 +44,10 @@ struct gntdev_unmap_notify { }; struct gntdev_grant_map { + atomic_t in_use; struct mmu_interval_notifier notifier; + bool notifier_init; struct list_head next; - struct vm_area_struct *vma; int index; int count; int flags; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 84b143eef395..4d9a3050de6a 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -286,6 +286,9 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) */ } + if (use_ptemod && map->notifier_init) + mmu_interval_notifier_remove(&map->notifier); + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -298,7 +301,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) { struct gntdev_grant_map *map = data; - unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; + unsigned int pgnr = (addr - map->pages_vm_start) >> PAGE_SHIFT; int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte | (1 << _GNTMAP_guest_avail0); u64 pte_maddr; @@ -367,8 +370,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) for (i = 0; i < map->count; i++) { if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - if (!use_ptemod) - alloced++; + alloced++; } else if (!err) err = -EINVAL; @@ -377,8 +379,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) if (use_ptemod) { if (map->kmap_ops[i].status == GNTST_okay) { - if (map->map_ops[i].status == GNTST_okay) - alloced++; + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; } else if (!err) err = -EINVAL; @@ -394,8 +395,14 @@ static void __unmap_grant_pages_done(int result, unsigned int i; struct gntdev_grant_map *map = data->data; unsigned int offset = data->unmap_ops - map->unmap_ops; + int successful_unmaps = 0; + int live_grants; for (i = 0; i < data->count; i++) { + if (map->unmap_ops[offset + i].status == GNTST_okay && + map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE) + successful_unmaps++; + WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay && map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE); pr_debug("unmap handle=%d st=%d\n", @@ -403,6 +410,10 @@ static void __unmap_grant_pages_done(int result, map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; if (use_ptemod) { + if (map->kunmap_ops[offset + i].status == GNTST_okay && + map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE) + successful_unmaps++; + WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay && map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE); pr_debug("kunmap handle=%u st=%d\n", @@ -411,11 +422,15 @@ static void __unmap_grant_pages_done(int result, map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; } } + /* * Decrease the live-grant counter. This must happen after the loop to * prevent premature reuse of the grants by gnttab_mmap(). */ - atomic_sub(data->count, &map->live_grants); + live_grants = atomic_sub_return(successful_unmaps, &map->live_grants); + if (WARN_ON(live_grants < 0)) + pr_err("%s: live_grants became negative (%d) after unmapping %d pages!\n", + __func__, live_grants, successful_unmaps); /* Release reference taken by __unmap_grant_pages */ gntdev_put_map(NULL, map); @@ -496,11 +511,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) struct gntdev_priv *priv = file->private_data; pr_debug("gntdev_vma_close %p\n", vma); - if (use_ptemod) { - WARN_ON(map->vma != vma); - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } + vma->vm_private_data = NULL; gntdev_put_map(priv, map); } @@ -528,29 +539,30 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; + unsigned long map_start, map_end; if (!mmu_notifier_range_blockable(range)) return false; + map_start = map->pages_vm_start; + map_end = map->pages_vm_start + (map->count << PAGE_SHIFT); + /* * If the VMA is split or otherwise changed the notifier is not * updated, but we don't want to process VA's outside the modified * VMA. FIXME: It would be much more understandable to just prevent * modifying the VMA in the first place. */ - if (map->vma->vm_start >= range->end || - map->vma->vm_end <= range->start) + if (map_start >= range->end || map_end <= range->start) return true; - mstart = max(range->start, map->vma->vm_start); - mend = min(range->end, map->vma->vm_end); + mstart = max(range->start, map_start); + mend = min(range->end, map_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", - map->index, map->count, - map->vma->vm_start, map->vma->vm_end, - range->start, range->end, mstart, mend); - unmap_grant_pages(map, - (mstart - map->vma->vm_start) >> PAGE_SHIFT, - (mend - mstart) >> PAGE_SHIFT); + map->index, map->count, map_start, map_end, + range->start, range->end, mstart, mend); + unmap_grant_pages(map, (mstart - map_start) >> PAGE_SHIFT, + (mend - mstart) >> PAGE_SHIFT); return true; } @@ -1030,18 +1042,15 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) return -EINVAL; pr_debug("map %d+%d at %lx (pgoff %lx)\n", - index, count, vma->vm_start, vma->vm_pgoff); + index, count, vma->vm_start, vma->vm_pgoff); mutex_lock(&priv->lock); map = gntdev_find_map_index(priv, index, count); if (!map) goto unlock_out; - if (use_ptemod && map->vma) + if (!atomic_add_unless(&map->in_use, 1, 1)) goto unlock_out; - if (atomic_read(&map->live_grants)) { - err = -EAGAIN; - goto unlock_out; - } + refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; @@ -1062,15 +1071,16 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) map->flags |= GNTMAP_readonly; } + map->pages_vm_start = vma->vm_start; + if (use_ptemod) { - map->vma = vma; err = mmu_interval_notifier_insert_locked( &map->notifier, vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, &gntdev_mmu_ops); - if (err) { - map->vma = NULL; + if (err) goto out_unlock_put; - } + + map->notifier_init = true; } mutex_unlock(&priv->lock); @@ -1087,7 +1097,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) */ mmu_interval_read_begin(&map->notifier); - map->pages_vm_start = vma->vm_start; err = apply_to_page_range(vma->vm_mm, vma->vm_start, vma->vm_end - vma->vm_start, find_grant_ptes, map); @@ -1116,13 +1125,8 @@ unlock_out: out_unlock_put: mutex_unlock(&priv->lock); out_put_map: - if (use_ptemod) { + if (use_ptemod) unmap_grant_pages(map, 0, map->count); - if (map->vma) { - mmu_interval_notifier_remove(&map->notifier); - map->vma = NULL; - } - } gntdev_put_map(priv, map); return err; } diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 8973fc1e9ccc..860f37c93af4 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -25,7 +25,7 @@ struct xen_grant_dma_data { bool broken; }; -static DEFINE_XARRAY(xen_grant_dma_devices); +static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ); #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) @@ -42,14 +42,29 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma) static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) { struct xen_grant_dma_data *data; + unsigned long flags; - xa_lock(&xen_grant_dma_devices); + xa_lock_irqsave(&xen_grant_dma_devices, flags); data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); - xa_unlock(&xen_grant_dma_devices); + xa_unlock_irqrestore(&xen_grant_dma_devices, flags); return data; } +static int store_xen_grant_dma_data(struct device *dev, + struct xen_grant_dma_data *data) +{ + unsigned long flags; + int ret; + + xa_lock_irqsave(&xen_grant_dma_devices, flags); + ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_ATOMIC)); + xa_unlock_irqrestore(&xen_grant_dma_devices, flags); + + return ret; +} + /* * DMA ops for Xen frontends (e.g. virtio). * @@ -153,7 +168,7 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = PFN_UP(offset + size); grant_ref_t grant; dma_addr_t dma_handle; @@ -185,7 +200,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned long offset = dma_handle & (PAGE_SIZE - 1); + unsigned int i, n_pages = PFN_UP(offset + size); grant_ref_t grant; if (WARN_ON(dir == DMA_NONE)) @@ -273,72 +289,91 @@ static const struct dma_map_ops xen_grant_dma_ops = { .dma_supported = xen_grant_dma_supported, }; -bool xen_is_grant_dma_device(struct device *dev) +static bool xen_is_dt_grant_dma_device(struct device *dev) { struct device_node *iommu_np; bool has_iommu; - /* XXX Handle only DT devices for now */ - if (!dev->of_node) - return false; - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); - has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + has_iommu = iommu_np && + of_device_is_compatible(iommu_np, "xen,grant-dma"); of_node_put(iommu_np); return has_iommu; } +bool xen_is_grant_dma_device(struct device *dev) +{ + /* XXX Handle only DT devices for now */ + if (dev->of_node) + return xen_is_dt_grant_dma_device(dev); + + return false; +} + bool xen_virtio_mem_acc(struct virtio_device *dev) { - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) + if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) return true; return xen_is_grant_dma_device(dev->dev.parent); } -void xen_grant_setup_dma_ops(struct device *dev) +static int xen_dt_grant_init_backend_domid(struct device *dev, + struct xen_grant_dma_data *data) { - struct xen_grant_dma_data *data; struct of_phandle_args iommu_spec; - data = find_xen_grant_dma_data(dev); - if (data) { - dev_err(dev, "Xen grant DMA data is already created\n"); - return; - } - - /* XXX ACPI device unsupported for now */ - if (!dev->of_node) - goto err; - if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", 0, &iommu_spec)) { dev_err(dev, "Cannot parse iommus property\n"); - goto err; + return -ESRCH; } if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || iommu_spec.args_count != 1) { dev_err(dev, "Incompatible IOMMU node\n"); of_node_put(iommu_spec.np); - goto err; + return -ESRCH; } of_node_put(iommu_spec.np); + /* + * The endpoint ID here means the ID of the domain where the + * corresponding backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + return 0; +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) goto err; - /* - * The endpoint ID here means the ID of the domain where the corresponding - * backend is running - */ - data->backend_domid = iommu_spec.args[0]; + if (dev->of_node) { + if (xen_dt_grant_init_backend_domid(dev, data)) + goto err; + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { + dev_info(dev, "Using dom0 as backend\n"); + data->backend_domid = 0; + } else { + /* XXX ACPI device unsupported for now */ + goto err; + } - if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, - GFP_KERNEL))) { + if (store_xen_grant_dma_data(dev, data)) { dev_err(dev, "Cannot store Xen grant DMA data\n"); goto err; } @@ -348,9 +383,20 @@ void xen_grant_setup_dma_ops(struct device *dev) return; err: + devm_kfree(dev, data); dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); } +bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) +{ + bool ret = xen_virtio_mem_acc(dev); + + if (ret) + xen_grant_setup_dma_ops(dev->dev.parent); + + return ret; +} + MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); MODULE_AUTHOR("Juergen Gross <[email protected]>"); MODULE_LICENSE("GPL"); diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index bde63ef677b8..d171091eec12 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -31,7 +31,7 @@ MODULE_PARM_DESC(passthrough, " frontend (for example, a device at 06:01.b will still appear at\n"\ " 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\ " exposed PCI devices to its driver domains. This may be required\n"\ - " for drivers which depend on finding their hardward in certain\n"\ + " for drivers which depend on finding their hardware in certain\n"\ " bus/slot locations."); static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) @@ -951,16 +951,13 @@ static bool __get_reqs_available(struct kioctx *ctx) local_irq_save(flags); kcpu = this_cpu_ptr(ctx->cpu); if (!kcpu->reqs_available) { - int old, avail = atomic_read(&ctx->reqs_available); + int avail = atomic_read(&ctx->reqs_available); do { if (avail < ctx->req_batch) goto out; - - old = avail; - avail = atomic_cmpxchg(&ctx->reqs_available, - avail, avail - ctx->req_batch); - } while (avail != old); + } while (!atomic_try_cmpxchg(&ctx->reqs_available, + &avail, avail - ctx->req_batch)); kcpu->reqs_available += ctx->req_batch; } diff --git a/fs/buffer.c b/fs/buffer.c index b927f6981ad1..d9c6d1fbb6dd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1453,19 +1453,15 @@ EXPORT_SYMBOL(set_bh_page); static void discard_buffer(struct buffer_head * bh) { - unsigned long b_state, b_state_old; + unsigned long b_state; lock_buffer(bh); clear_buffer_dirty(bh); bh->b_bdev = NULL; - b_state = bh->b_state; - for (;;) { - b_state_old = cmpxchg(&bh->b_state, b_state, - (b_state & ~BUFFER_FLAGS_DISCARD)); - if (b_state_old == b_state) - break; - b_state = b_state_old; - } + b_state = READ_ONCE(bh->b_state); + do { + } while (!try_cmpxchg(&bh->b_state, &b_state, + b_state & ~BUFFER_FLAGS_DISCARD)); unlock_buffer(bh); } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 53cfe026b3ea..fb023f9fafcb 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -754,6 +754,7 @@ void ceph_add_cap(struct inode *inode, cap->issue_seq = seq; cap->mseq = mseq; cap->cap_gen = gen; + wake_up_all(&ci->i_cap_wq); } /* @@ -2285,7 +2286,7 @@ retry: struct ceph_mds_request *req; int i; - sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL); + sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL); if (!sessions) { err = -ENOMEM; goto out; @@ -2759,13 +2760,17 @@ again: * on transition from wanted -> needed caps. This is needed * for WRBUFFER|WR -> WR to avoid a new WR sync write from * going before a prior buffered writeback happens. + * + * For RDCACHE|RD -> RD, there is not need to wait and we can + * just exclude the revoking caps and force to sync read. */ int not = want & ~(have & need); int revoking = implemented & ~have; + int exclude = revoking & not; dout("get_cap_refs %p have %s but not %s (revoking %s)\n", inode, ceph_cap_string(have), ceph_cap_string(not), ceph_cap_string(revoking)); - if ((revoking & not) == 0) { + if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) { if (!snap_rwsem_locked && !ci->i_head_snapc && (need & CEPH_CAP_FILE_WR)) { @@ -2787,7 +2792,7 @@ again: snap_rwsem_locked = true; } if ((have & want) == want) - *got = need | want; + *got = need | (want & ~exclude); else *got = need; ceph_take_cap_refs(ci, *got, true); @@ -3550,6 +3555,9 @@ static void handle_cap_grant(struct inode *inode, check_caps = 1; /* check auth cap only */ else check_caps = 2; /* check all caps */ + /* If there is new caps, try to wake up the waiters */ + if (~cap->issued & newcaps) + wake = true; cap->issued = newcaps; cap->implemented |= newcaps; } else if (cap->issued == newcaps) { diff --git a/fs/ceph/export.c b/fs/ceph/export.c index e0fa66ac8b9f..f780e4e0d062 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -181,6 +181,7 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino) static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) { struct inode *inode = __lookup_inode(sb, ino); + struct ceph_inode_info *ci = ceph_inode(inode); int err; if (IS_ERR(inode)) @@ -192,7 +193,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino) return ERR_PTR(err); } /* -ESTALE if inode as been unlinked and no file is open */ - if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) { + if ((inode->i_nlink == 0) && !__ceph_is_file_opened(ci)) { iput(inode); return ERR_PTR(-ESTALE); } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 42351d7a0dd6..9ebb7cee7978 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -2192,6 +2192,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr) inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, &prealloc_cf); inode->i_ctime = attr->ia_ctime; + inode_inc_iversion_raw(inode); } release &= issued; @@ -2356,6 +2357,7 @@ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value, goto out; } + req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR; req->r_path2 = kstrdup(name, GFP_NOFS); if (!req->r_path2) { err = -ENOMEM; @@ -2447,6 +2449,7 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct inode *inode = d_inode(path->dentry); + struct super_block *sb = inode->i_sb; struct ceph_inode_info *ci = ceph_inode(inode); u32 valid_mask = STATX_BASIC_STATS; int err = 0; @@ -2476,16 +2479,34 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path, } if (ceph_snap(inode) == CEPH_NOSNAP) - stat->dev = inode->i_sb->s_dev; + stat->dev = sb->s_dev; else stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0; if (S_ISDIR(inode->i_mode)) { - if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), - RBYTES)) + if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) { stat->size = ci->i_rbytes; - else + } else if (ceph_snap(inode) == CEPH_SNAPDIR) { + struct ceph_inode_info *pci; + struct ceph_snap_realm *realm; + struct inode *parent; + + parent = ceph_lookup_inode(sb, ceph_ino(inode)); + if (!parent) + return PTR_ERR(parent); + + pci = ceph_inode(parent); + spin_lock(&pci->i_ceph_lock); + realm = pci->i_snap_realm; + if (realm) + stat->size = realm->num_snaps; + else + stat->size = 0; + spin_unlock(&pci->i_ceph_lock); + iput(parent); + } else { stat->size = ci->i_files + ci->i_subdirs; + } stat->blocks = 0; stat->blksize = 65536; /* diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 80f8b9ec1a31..26a0a8b9975e 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2318,6 +2318,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) INIT_LIST_HEAD(&req->r_unsafe_dir_item); INIT_LIST_HEAD(&req->r_unsafe_target_item); req->r_fmode = -1; + req->r_feature_needed = -1; kref_init(&req->r_kref); RB_CLEAR_NODE(&req->r_node); INIT_LIST_HEAD(&req->r_wait); @@ -2916,6 +2917,16 @@ static void __do_request(struct ceph_mds_client *mdsc, dout("do_request mds%d session %p state %s\n", mds, session, ceph_session_state_name(session->s_state)); + + /* + * The old ceph will crash the MDSs when see unknown OPs + */ + if (req->r_feature_needed > 0 && + !test_bit(req->r_feature_needed, &session->s_features)) { + err = -EOPNOTSUPP; + goto out_session; + } + if (session->s_state != CEPH_MDS_SESSION_OPEN && session->s_state != CEPH_MDS_SESSION_HUNG) { /* diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 256e3eada6c1..0598faa50e2e 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -31,8 +31,9 @@ enum ceph_feature_type { CEPHFS_FEATURE_METRIC_COLLECT, CEPHFS_FEATURE_ALTERNATE_NAME, CEPHFS_FEATURE_NOTIFY_SESSION_STATE, + CEPHFS_FEATURE_OP_GETVXATTR, - CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_NOTIFY_SESSION_STATE, + CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_OP_GETVXATTR, }; #define CEPHFS_FEATURES_CLIENT_SUPPORTED { \ @@ -44,6 +45,7 @@ enum ceph_feature_type { CEPHFS_FEATURE_DELEG_INO, \ CEPHFS_FEATURE_METRIC_COLLECT, \ CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \ + CEPHFS_FEATURE_OP_GETVXATTR, \ } /* @@ -336,6 +338,8 @@ struct ceph_mds_request { long long r_dir_ordered_cnt; int r_readdir_cache_idx; + int r_feature_needed; + struct ceph_cap_reservation r_caps_reservation; }; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8b56b94e2f56..52954d4637b5 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1065,7 +1065,7 @@ static inline bool list_add_tail_lockless(struct list_head *new, * added to the list from another CPU: the winner observes * new->next == new. */ - if (cmpxchg(&new->next, new, head) != new) + if (!try_cmpxchg(&new->next, &new, head)) return false; /* diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index c83fd0e8404d..2015e42e752a 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -21,7 +21,6 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) int pagenum; int bytes_read; int bytes_to_read; - void *vaddr; off += node->page_offset; pagenum = off >> PAGE_SHIFT; @@ -33,9 +32,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) page = node->page[pagenum]; bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); - vaddr = kmap_atomic(page); - memcpy(buf + bytes_read, vaddr + off, bytes_to_read); - kunmap_atomic(vaddr); + memcpy_from_page(buf + bytes_read, page, off, bytes_to_read); pagenum++; off = 0; /* page offset only applies to the first page */ @@ -80,8 +77,7 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) off += node->page_offset; page = node->page[0]; - memcpy(kmap(page) + off, buf, len); - kunmap(page); + memcpy_to_page(page, off, buf, len); set_page_dirty(page); } @@ -105,8 +101,7 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) off += node->page_offset; page = node->page[0]; - memset(kmap(page) + off, 0, len); - kunmap(page); + memzero_page(page, off, len); set_page_dirty(page); } @@ -123,9 +118,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, src_page = src_node->page[0]; dst_page = dst_node->page[0]; - memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len); - kunmap(src_page); - kunmap(dst_page); + memcpy_page(dst_page, dst, src_page, src, len); set_page_dirty(dst_page); } @@ -140,9 +133,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) src += node->page_offset; dst += node->page_offset; page = node->page[0]; - ptr = kmap(page); + ptr = kmap_local_page(page); memmove(ptr + dst, ptr + src, len); - kunmap(page); + kunmap_local(ptr); set_page_dirty(page); } @@ -346,13 +339,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; - desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); + desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + + node->page_offset); node->prev = be32_to_cpu(desc->prev); node->next = be32_to_cpu(desc->next); node->num_recs = be16_to_cpu(desc->num_recs); node->type = desc->type; node->height = desc->height; - kunmap(node->page[0]); + kunmap_local(desc); switch (node->type) { case HFS_NODE_HEADER: @@ -436,14 +430,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) } pagep = node->page; - memset(kmap(*pagep) + node->page_offset, 0, - min((int)PAGE_SIZE, (int)tree->node_size)); + memzero_page(*pagep, node->page_offset, + min((int)PAGE_SIZE, (int)tree->node_size)); set_page_dirty(*pagep); - kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_SIZE); + memzero_page(*++pagep, 0, PAGE_SIZE); set_page_dirty(*pagep); - kunmap(*pagep); } clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 19017d296173..2fa4b1f8cc7f 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -80,7 +80,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke goto free_inode; /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); tree->leaf_head = be32_to_cpu(head->leaf_head); @@ -119,11 +120,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - kunmap(page); + kunmap_local(head); put_page(page); return tree; fail_page: + kunmap_local(head); put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; @@ -169,7 +171,8 @@ void hfs_btree_write(struct hfs_btree *tree) return; /* Load the header */ page = node->page[0]; - head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); head->leaf_count = cpu_to_be32(tree->leaf_count); @@ -180,7 +183,7 @@ void hfs_btree_write(struct hfs_btree *tree) head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); - kunmap(page); + kunmap_local(head); set_page_dirty(page); hfs_bnode_put(node); } @@ -268,7 +271,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; idx = 0; @@ -281,7 +284,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) idx += i; data[off] |= m; set_page_dirty(*pagep); - kunmap(*pagep); + kunmap_local(data); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); @@ -290,14 +293,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } if (++off >= PAGE_SIZE) { - kunmap(*pagep); - data = kmap(*++pagep); + kunmap_local(data); + data = kmap_local_page(*++pagep); off = 0; } idx += 8; len--; } - kunmap(*pagep); + kunmap_local(data); nidx = node->next; if (!nidx) { printk(KERN_DEBUG "create new bmap node...\n"); @@ -313,7 +316,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; } } @@ -360,20 +363,20 @@ void hfs_bmap_free(struct hfs_bnode *node) } off += node->page_offset + nidx / 8; page = node->page[off >> PAGE_SHIFT]; - data = kmap(page); + data = kmap_local_page(page); off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { pr_crit("trying to free free bnode %u(%d)\n", node->this, node->type); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c index cebce0cfe340..bd8dcea85588 100644 --- a/fs/hfsplus/bitmap.c +++ b/fs/hfsplus/bitmap.c @@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, start = size; goto out; } - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); @@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, } curr++; } - kunmap(page); + kunmap_local(pptr); offset += PAGE_CACHE_BITS; if (offset >= size) break; @@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, start = size; goto out; } - curr = pptr = kmap(page); + curr = pptr = kmap_local_page(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else @@ -127,7 +127,7 @@ found: len -= 32; } set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); @@ -135,7 +135,7 @@ found: start = size; goto out; } - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } @@ -151,7 +151,7 @@ last: done: *curr = cpu_to_be32(n); set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); *max = offset + (curr - pptr) * 32 + i - start; sbi->free_blocks -= *max; hfsplus_mark_mdb_dirty(sb); @@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) page = read_mapping_page(mapping, pnr, NULL); if (IS_ERR(page)) goto kaboom; - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; end = pptr + PAGE_CACHE_BITS / 32; len = count; @@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) if (!count) break; set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); page = read_mapping_page(mapping, ++pnr, NULL); if (IS_ERR(page)) goto kaboom; - pptr = kmap(page); + pptr = kmap_local_page(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } @@ -231,7 +231,7 @@ done: } out: set_page_dirty(page); - kunmap(page); + kunmap_local(pptr); sbi->free_blocks += len; hfsplus_mark_mdb_dirty(sb); mutex_unlock(&sbi->alloc_mutex); diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index a5ab00e54220..87974d5e6791 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -29,14 +29,12 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) off &= ~PAGE_MASK; l = min_t(int, len, PAGE_SIZE - off); - memcpy(buf, kmap(*pagep) + off, l); - kunmap(*pagep); + memcpy_from_page(buf, *pagep, off, l); while ((len -= l) != 0) { buf += l; l = min_t(int, len, PAGE_SIZE); - memcpy(buf, kmap(*++pagep), l); - kunmap(*pagep); + memcpy_from_page(buf, *++pagep, 0, l); } } @@ -82,16 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) off &= ~PAGE_MASK; l = min_t(int, len, PAGE_SIZE - off); - memcpy(kmap(*pagep) + off, buf, l); + memcpy_to_page(*pagep, off, buf, l); set_page_dirty(*pagep); - kunmap(*pagep); while ((len -= l) != 0) { buf += l; l = min_t(int, len, PAGE_SIZE); - memcpy(kmap(*++pagep), buf, l); + memcpy_to_page(*++pagep, 0, buf, l); set_page_dirty(*pagep); - kunmap(*pagep); } } @@ -112,15 +108,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) off &= ~PAGE_MASK; l = min_t(int, len, PAGE_SIZE - off); - memset(kmap(*pagep) + off, 0, l); + memzero_page(*pagep, off, l); set_page_dirty(*pagep); - kunmap(*pagep); while ((len -= l) != 0) { l = min_t(int, len, PAGE_SIZE); - memset(kmap(*++pagep), 0, l); + memzero_page(*++pagep, 0, l); set_page_dirty(*pagep); - kunmap(*pagep); } } @@ -142,24 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, if (src == dst) { l = min_t(int, len, PAGE_SIZE - src); - memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); - kunmap(*src_page); + memcpy_page(*dst_page, src, *src_page, src, l); set_page_dirty(*dst_page); - kunmap(*dst_page); while ((len -= l) != 0) { l = min_t(int, len, PAGE_SIZE); - memcpy(kmap(*++dst_page), kmap(*++src_page), l); - kunmap(*src_page); + memcpy_page(*++dst_page, 0, *++src_page, 0, l); set_page_dirty(*dst_page); - kunmap(*dst_page); } } else { void *src_ptr, *dst_ptr; do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; if (PAGE_SIZE - src < PAGE_SIZE - dst) { l = PAGE_SIZE - src; src = 0; @@ -171,9 +161,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, } l = min(len, l); memcpy(dst_ptr, src_ptr, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); if (!dst) dst_page++; else @@ -185,6 +175,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) { struct page **src_page, **dst_page; + void *src_ptr, *dst_ptr; int l; hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); @@ -202,27 +193,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) if (src == dst) { while (src < len) { - memmove(kmap(*dst_page), kmap(*src_page), src); - kunmap(*src_page); + dst_ptr = kmap_local_page(*dst_page); + src_ptr = kmap_local_page(*src_page); + memmove(dst_ptr, src_ptr, src); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); len -= src; src = PAGE_SIZE; src_page--; dst_page--; } src -= len; - memmove(kmap(*dst_page) + src, - kmap(*src_page) + src, len); - kunmap(*src_page); + dst_ptr = kmap_local_page(*dst_page); + src_ptr = kmap_local_page(*src_page); + memmove(dst_ptr + src, src_ptr + src, len); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); } else { - void *src_ptr, *dst_ptr; - do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; if (src < dst) { l = src; src = PAGE_SIZE; @@ -234,9 +226,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) } l = min(len, l); memmove(dst_ptr - l, src_ptr - l, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); if (dst == PAGE_SIZE) dst_page--; else @@ -251,26 +243,27 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) if (src == dst) { l = min_t(int, len, PAGE_SIZE - src); - memmove(kmap(*dst_page) + src, - kmap(*src_page) + src, l); - kunmap(*src_page); + + dst_ptr = kmap_local_page(*dst_page) + src; + src_ptr = kmap_local_page(*src_page) + src; + memmove(dst_ptr, src_ptr, l); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); while ((len -= l) != 0) { l = min_t(int, len, PAGE_SIZE); - memmove(kmap(*++dst_page), - kmap(*++src_page), l); - kunmap(*src_page); + dst_ptr = kmap_local_page(*++dst_page); + src_ptr = kmap_local_page(*++src_page); + memmove(dst_ptr, src_ptr, l); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); } } else { - void *src_ptr, *dst_ptr; - do { - src_ptr = kmap(*src_page) + src; - dst_ptr = kmap(*dst_page) + dst; + dst_ptr = kmap_local_page(*dst_page) + dst; + src_ptr = kmap_local_page(*src_page) + src; if (PAGE_SIZE - src < PAGE_SIZE - dst) { l = PAGE_SIZE - src; @@ -283,9 +276,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) } l = min(len, l); memmove(dst_ptr, src_ptr, l); - kunmap(*src_page); + kunmap_local(src_ptr); set_page_dirty(*dst_page); - kunmap(*dst_page); + kunmap_local(dst_ptr); if (!dst) dst_page++; else @@ -498,14 +491,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) if (!test_bit(HFS_BNODE_NEW, &node->flags)) return node; - desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + - node->page_offset); + desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + + node->page_offset); node->prev = be32_to_cpu(desc->prev); node->next = be32_to_cpu(desc->next); node->num_recs = be16_to_cpu(desc->num_recs); node->type = desc->type; node->height = desc->height; - kunmap(node->page[0]); + kunmap_local(desc); switch (node->type) { case HFS_NODE_HEADER: @@ -589,14 +582,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) } pagep = node->page; - memset(kmap(*pagep) + node->page_offset, 0, - min_t(int, PAGE_SIZE, tree->node_size)); + memzero_page(*pagep, node->page_offset, + min_t(int, PAGE_SIZE, tree->node_size)); set_page_dirty(*pagep); - kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_SIZE); + memzero_page(*++pagep, 0, PAGE_SIZE); set_page_dirty(*pagep); - kunmap(*pagep); } clear_bit(HFS_BNODE_NEW, &node->flags); wake_up(&node->lock_wq); diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 66774f4cb4fd..9e1732a2b92a 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -163,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) goto free_inode; /* Load the header */ - head = (struct hfs_btree_header_rec *)(kmap(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); tree->root = be32_to_cpu(head->root); tree->leaf_count = be32_to_cpu(head->leaf_count); @@ -240,11 +240,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - kunmap(page); + kunmap_local(head); put_page(page); return tree; fail_page: + kunmap_local(head); put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfsplus_aops; @@ -291,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree) return -EIO; /* Load the header */ page = node->page[0]; - head = (struct hfs_btree_header_rec *)(kmap(page) + + head = (struct hfs_btree_header_rec *)(kmap_local_page(page) + sizeof(struct hfs_bnode_desc)); head->root = cpu_to_be32(tree->root); @@ -303,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree) head->attributes = cpu_to_be32(tree->attributes); head->depth = cpu_to_be16(tree->depth); - kunmap(page); + kunmap_local(head); set_page_dirty(page); hfs_bnode_put(node); return 0; @@ -394,7 +395,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; idx = 0; @@ -407,7 +408,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) idx += i; data[off] |= m; set_page_dirty(*pagep); - kunmap(*pagep); + kunmap_local(data); tree->free_nodes--; mark_inode_dirty(tree->inode); hfs_bnode_put(node); @@ -417,14 +418,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } if (++off >= PAGE_SIZE) { - kunmap(*pagep); - data = kmap(*++pagep); + kunmap_local(data); + data = kmap_local_page(*++pagep); off = 0; } idx += 8; len--; } - kunmap(*pagep); + kunmap_local(data); nidx = node->next; if (!nidx) { hfs_dbg(BNODE_MOD, "create new bmap node\n"); @@ -440,7 +441,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; pagep = node->page + (off >> PAGE_SHIFT); - data = kmap(*pagep); + data = kmap_local_page(*pagep); off &= ~PAGE_MASK; } } @@ -490,7 +491,7 @@ void hfs_bmap_free(struct hfs_bnode *node) } off += node->page_offset + nidx / 8; page = node->page[off >> PAGE_SHIFT]; - data = kmap(page); + data = kmap_local_page(page); off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; @@ -498,13 +499,13 @@ void hfs_bmap_free(struct hfs_bnode *node) pr_crit("trying to free free bnode " "%u(%d)\n", node->this, node->type); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); return; } data[off] = byte & ~m; set_page_dirty(page); - kunmap(page); + kunmap_local(data); hfs_bnode_put(node); tree->free_nodes++; mark_inode_dirty(tree->inode); diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 59b03d74ecbe..c4da3f634b92 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c @@ -67,8 +67,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, for ( i = 0 ; i < pcount ; i++ ) { if (!pages[i]) continue; - memset(page_address(pages[i]), 0, PAGE_SIZE); - flush_dcache_page(pages[i]); + memzero_page(pages[i], 0, PAGE_SIZE); SetPageUptodate(pages[i]); } return ((loff_t)pcount) << PAGE_SHIFT; @@ -120,7 +119,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, zerr != Z_STREAM_END) { if (!stream.avail_out) { if (pages[curpage]) { - stream.next_out = page_address(pages[curpage]) + stream.next_out = kmap_local_page(pages[curpage]) + poffset; stream.avail_out = PAGE_SIZE - poffset; poffset = 0; @@ -176,6 +175,10 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, flush_dcache_page(pages[curpage]); SetPageUptodate(pages[curpage]); } + if (stream.next_out != (unsigned char *)zisofs_sink_page) { + kunmap_local(stream.next_out); + stream.next_out = NULL; + } curpage++; } if (!stream.avail_in) @@ -183,6 +186,8 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, } inflate_out: zlib_inflateEnd(&stream); + if (stream.next_out && stream.next_out != (unsigned char *)zisofs_sink_page) + kunmap_local(stream.next_out); z_eio: mutex_unlock(&zisofs_zlib_lock); @@ -283,9 +288,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, } if (poffset && *pages) { - memset(page_address(*pages) + poffset, 0, - PAGE_SIZE - poffset); - flush_dcache_page(*pages); + memzero_page(*pages, poffset, PAGE_SIZE - poffset); SetPageUptodate(*pages); } return 0; @@ -343,10 +346,8 @@ static int zisofs_read_folio(struct file *file, struct folio *folio) for (i = 0; i < pcount; i++, index++) { if (i != full_page) pages[i] = grab_cache_page_nowait(mapping, index); - if (pages[i]) { + if (pages[i]) ClearPageError(pages[i]); - kmap(pages[i]); - } } err = zisofs_fill_pages(inode, full_page, pcount, pages); @@ -357,7 +358,6 @@ static int zisofs_read_folio(struct file *file, struct folio *folio) flush_dcache_page(pages[i]); if (i == full_page && err) SetPageError(pages[i]); - kunmap(pages[i]); unlock_page(pages[i]); if (i != full_page) put_page(pages[i]); diff --git a/fs/libfs.c b/fs/libfs.c index 31b0ddf01c31..682d56345a1c 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -15,6 +15,7 @@ #include <linux/mutex.h> #include <linux/namei.h> #include <linux/exportfs.h> +#include <linux/iversion.h> #include <linux/writeback.h> #include <linux/buffer_head.h> /* sync_mapping_buffers */ #include <linux/fs_context.h> @@ -1520,3 +1521,48 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry) #endif } EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops); + +/** + * inode_maybe_inc_iversion - increments i_version + * @inode: inode with the i_version that should be updated + * @force: increment the counter even if it's not necessary? + * + * Every time the inode is modified, the i_version field must be seen to have + * changed by any observer. + * + * If "force" is set or the QUERIED flag is set, then ensure that we increment + * the value, and clear the queried flag. + * + * In the common case where neither is set, then we can return "false" without + * updating i_version. + * + * If this function returns false, and no other metadata has changed, then we + * can avoid logging the metadata. + */ +bool inode_maybe_inc_iversion(struct inode *inode, bool force) +{ + u64 cur, new; + + /* + * The i_version field is not strictly ordered with any other inode + * information, but the legacy inode_inc_iversion code used a spinlock + * to serialize increments. + * + * Here, we add full memory barriers to ensure that any de-facto + * ordering with other info is preserved. + * + * This barrier pairs with the barrier in inode_query_iversion() + */ + smp_mb(); + cur = inode_peek_iversion_raw(inode); + do { + /* If flag is clear then we needn't do anything */ + if (!force && !(cur & I_VERSION_QUERIED)) + return false; + + /* Since lowest bit is flag, add 2 to avoid it */ + new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT; + } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new)); + return true; +} +EXPORT_SYMBOL(inode_maybe_inc_iversion); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index e0b205b63986..d8ec889a4b3f 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -656,9 +656,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; } if (mntflags & NFS_MOUNT_WRITE_WAIT) { - result = filemap_fdatawait_range(file->f_mapping, - iocb->ki_pos - written, - iocb->ki_pos - 1); + filemap_fdatawait_range(file->f_mapping, + iocb->ki_pos - written, + iocb->ki_pos - 1); } result = generic_write_sync(iocb, written); if (result < 0) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 7d285561e59f..1ec79ccf89ad 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -30,14 +30,20 @@ #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) #define FF_LAYOUTRETURN_MAXERR 20 +enum nfs4_ff_op_type { + NFS4_FF_OP_LAYOUTSTATS, + NFS4_FF_OP_LAYOUTRETURN, +}; + static unsigned short io_maxretrans; static const struct pnfs_commit_ops ff_layout_commit_ops; static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, struct nfs_pgio_header *hdr); -static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, +static int +ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, struct nfs42_layoutstat_devinfo *devinfo, - int dev_limit); + int dev_limit, enum nfs4_ff_op_type type); static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, const struct nfs42_layoutstat_devinfo *devinfo, struct nfs4_ff_layout_mirror *mirror); @@ -1373,6 +1379,11 @@ static int ff_layout_read_prepare_common(struct rpc_task *task, return -EIO; } + if (!pnfs_is_valid_lseg(hdr->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + ff_layout_read_record_layoutstats_start(task, hdr); return 0; } @@ -1553,6 +1564,11 @@ static int ff_layout_write_prepare_common(struct rpc_task *task, return -EIO; } + if (!pnfs_is_valid_lseg(hdr->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + ff_layout_write_record_layoutstats_start(task, hdr); return 0; } @@ -1645,15 +1661,23 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags); } -static void ff_layout_commit_prepare_common(struct rpc_task *task, - struct nfs_commit_data *cdata) +static int ff_layout_commit_prepare_common(struct rpc_task *task, + struct nfs_commit_data *cdata) { + if (!pnfs_is_valid_lseg(cdata->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + ff_layout_commit_record_layoutstats_start(task, cdata); + return 0; } static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) { - ff_layout_commit_prepare_common(task, data); + if (ff_layout_commit_prepare_common(task, data)) + return; + rpc_call_start(task); } @@ -1949,6 +1973,65 @@ ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, ff_layout_initiate_commit); } +static bool ff_layout_match_rw(const struct rpc_task *task, + const struct nfs_pgio_header *hdr, + const struct pnfs_layout_segment *lseg) +{ + return hdr->lseg == lseg; +} + +static bool ff_layout_match_commit(const struct rpc_task *task, + const struct nfs_commit_data *cdata, + const struct pnfs_layout_segment *lseg) +{ + return cdata->lseg == lseg; +} + +static bool ff_layout_match_io(const struct rpc_task *task, const void *data) +{ + const struct rpc_call_ops *ops = task->tk_ops; + + if (ops == &ff_layout_read_call_ops_v3 || + ops == &ff_layout_read_call_ops_v4 || + ops == &ff_layout_write_call_ops_v3 || + ops == &ff_layout_write_call_ops_v4) + return ff_layout_match_rw(task, task->tk_calldata, data); + if (ops == &ff_layout_commit_call_ops_v3 || + ops == &ff_layout_commit_call_ops_v4) + return ff_layout_match_commit(task, task->tk_calldata, data); + return false; +} + +static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_ff_layout_ds *mirror_ds; + struct nfs4_pnfs_ds *ds; + struct nfs_client *ds_clp; + struct rpc_clnt *clnt; + u32 idx; + + for (idx = 0; idx < flseg->mirror_array_cnt; idx++) { + mirror = flseg->mirror_array[idx]; + mirror_ds = mirror->mirror_ds; + if (!mirror_ds) + continue; + ds = mirror->mirror_ds->ds; + if (!ds) + continue; + ds_clp = ds->ds_clp; + if (!ds_clp) + continue; + clnt = ds_clp->cl_rpcclient; + if (!clnt) + continue; + if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg)) + continue; + rpc_clnt_disconnect(clnt); + } +} + static struct pnfs_ds_commit_info * ff_layout_get_ds_info(struct inode *inode) { @@ -2161,8 +2244,9 @@ ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) FF_LAYOUTRETURN_MAXERR); spin_lock(&args->inode->i_lock); - ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, - &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo)); + ff_args->num_dev = ff_layout_mirror_prepare_stats( + &ff_layout->generic_hdr, &ff_args->devinfo[0], + ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN); spin_unlock(&args->inode->i_lock); args->ld_private->ops = &layoutreturn_ops; @@ -2396,7 +2480,7 @@ static const struct nfs4_xdr_opaque_ops layoutstat_ops = { static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, struct nfs42_layoutstat_devinfo *devinfo, - int dev_limit) + int dev_limit, enum nfs4_ff_op_type type) { struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); struct nfs4_ff_layout_mirror *mirror; @@ -2408,7 +2492,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, break; if (IS_ERR_OR_NULL(mirror->mirror_ds)) continue; - if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags)) + if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, + &mirror->flags) && + type != NFS4_FF_OP_LAYOUTRETURN) continue; /* mirror refcount put in cleanup_layoutstats */ if (!refcount_inc_not_zero(&mirror->ref)) @@ -2448,7 +2534,9 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) spin_lock(&args->inode->i_lock); ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, - &args->devinfo[0], dev_count); + &args->devinfo[0], + dev_count, + NFS4_FF_OP_LAYOUTSTATS); spin_unlock(&args->inode->i_lock); if (!args->num_dev) { kfree(args->devinfo); @@ -2501,6 +2589,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .prepare_layoutreturn = ff_layout_prepare_layoutreturn, .sync = pnfs_nfs_generic_sync, .prepare_layoutstats = ff_layout_prepare_layoutstats, + .cancel_io = ff_layout_cancel_io, }; static int __init nfs4flexfilelayout_init(void) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 8f3773dc38dd..6b2cfa59a1a2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -313,7 +313,7 @@ struct nfs_find_desc { static int nfs_find_actor(struct inode *inode, void *opaque) { - struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque; + struct nfs_find_desc *desc = opaque; struct nfs_fh *fh = desc->fh; struct nfs_fattr *fattr = desc->fattr; @@ -331,7 +331,7 @@ nfs_find_actor(struct inode *inode, void *opaque) static int nfs_init_locked(struct inode *inode, void *opaque) { - struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque; + struct nfs_find_desc *desc = opaque; struct nfs_fattr *fattr = desc->fattr; set_nfs_fileid(inode, fattr->fileid); @@ -2267,7 +2267,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) static void init_once(void *foo) { - struct nfs_inode *nfsi = (struct nfs_inode *) foo; + struct nfs_inode *nfsi = foo; inode_init_once(&nfsi->vfs_inode); INIT_LIST_HEAD(&nfsi->open_files); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 898dd95bc7a7..d914d609b85b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -435,7 +435,6 @@ extern void nfs_zap_acl_cache(struct inode *inode); extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags); extern bool nfs_check_cache_invalid(struct inode *, unsigned long); extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); -extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode); /* super.c */ extern const struct super_operations nfs_sops; @@ -503,7 +502,6 @@ extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, const struct nfs_pgio_completion_ops *compl_ops); extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio); extern void nfs_commit_free(struct nfs_commit_data *p); -extern void nfs_write_prepare(struct rpc_task *task, void *calldata); extern void nfs_commit_prepare(struct rpc_task *task, void *calldata); extern int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index d37e4a5401b1..13424f0d793b 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1175,6 +1175,7 @@ static int _nfs42_proc_removexattr(struct inode *inode, const char *name) ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); + trace_nfs4_removexattr(inode, name, ret); if (!ret) nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); @@ -1214,6 +1215,7 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name, ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); + trace_nfs4_setxattr(inode, name, ret); for (; np > 0; np--) put_page(pages[np - 1]); @@ -1246,6 +1248,7 @@ static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); + trace_nfs4_getxattr(inode, name, ret); if (ret < 0) return ret; @@ -1317,6 +1320,7 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); + trace_nfs4_listxattr(inode, ret); if (ret >= 0) { ret = res.copied; diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c index a9bf09fdf2c3..76ae11834206 100644 --- a/fs/nfs/nfs42xattr.c +++ b/fs/nfs/nfs42xattr.c @@ -981,7 +981,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) static void nfs4_xattr_cache_init_once(void *p) { - struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; + struct nfs4_xattr_cache *cache = p; spin_lock_init(&cache->listxattr_lock); atomic_long_set(&cache->nent, 0); diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index b56f05113d36..fe1aeb0f048f 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -569,6 +569,14 @@ static int decode_listxattrs(struct xdr_stream *xdr, */ if (status == -ETOOSMALL) status = -ERANGE; + /* + * Special case: for LISTXATTRS, NFS4ERR_NOXATTR + * should be translated to success with zero-length reply. + */ + if (status == -ENODATA) { + res->eof = true; + status = 0; + } goto out; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 79df6e83881b..400a71e75238 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -459,7 +459,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *); /* nfs4renewd.c */ extern void nfs4_schedule_state_renewal(struct nfs_client *); -extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); extern void nfs4_kill_renewd(struct nfs_client *); extern void nfs4_renew_state(struct work_struct *); extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 3c5678aec006..7a5162afa5c0 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -254,7 +254,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init) goto error; ip_addr = (const char *)buf; } - strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); + strscpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); err = nfs_idmap_new(clp); if (err < 0) { diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index ec6afd3c4bca..e3fdd2f45b01 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -583,7 +583,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) struct request_key_auth *rka = get_request_key_auth(authkey); struct rpc_pipe_msg *msg; struct idmap_msg *im; - struct idmap *idmap = (struct idmap *)aux; + struct idmap *idmap = aux; struct key *key = rka->target_key; int ret = -ENOKEY; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4553803538e5..e2efcd26336c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6608,7 +6608,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) struct nfs4_delegreturndata *d_data; struct pnfs_layout_hdr *lo; - d_data = (struct nfs4_delegreturndata *)data; + d_data = data; if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { nfs4_sequence_done(task, &d_data->res.seq_res); @@ -8900,7 +8900,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { - struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data; + struct nfs4_add_xprt_data *adata = data; struct rpc_task *task; int status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7e185f7eb260..c3503fb26fa2 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -497,8 +497,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, sp = kzalloc(sizeof(*sp), gfp_flags); if (!sp) return NULL; - sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0, - gfp_flags); + sp->so_seqid.owner_id = ida_alloc(&server->openowner_id, gfp_flags); if (sp->so_seqid.owner_id < 0) { kfree(sp); return NULL; @@ -534,7 +533,7 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp) { nfs4_destroy_seqid_counter(&sp->so_seqid); put_cred(sp->so_cred); - ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id); + ida_free(&sp->so_server->openowner_id, sp->so_seqid.owner_id); kfree(sp); } @@ -877,8 +876,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f refcount_set(&lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner = fl_owner; - lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, - 0, 0, GFP_KERNEL_ACCOUNT); + lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT); if (lsp->ls_seqid.owner_id < 0) goto out_free; INIT_LIST_HEAD(&lsp->ls_locks); @@ -890,7 +888,7 @@ out_free: void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) { - ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id); + ida_free(&server->lockowner_id, lsp->ls_seqid.owner_id); nfs4_destroy_seqid_counter(&lsp->ls_seqid); kfree(lsp); } diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 6ee6ad3674a2..2cff5901c689 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -2097,6 +2097,7 @@ TRACE_EVENT(ff_layout_commit_error, ) ); +#ifdef CONFIG_NFS_V4_2 TRACE_DEFINE_ENUM(NFS4_CONTENT_DATA); TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE); @@ -2105,7 +2106,6 @@ TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE); { NFS4_CONTENT_DATA, "DATA" }, \ { NFS4_CONTENT_HOLE, "HOLE" }) -#ifdef CONFIG_NFS_V4_2 TRACE_EVENT(nfs4_llseek, TP_PROTO( const struct inode *inode, @@ -2496,6 +2496,54 @@ TRACE_EVENT(nfs4_offload_cancel, __entry->stateid_seq, __entry->stateid_hash ) ); + +DECLARE_EVENT_CLASS(nfs4_xattr_event, + TP_PROTO( + const struct inode *inode, + const char *name, + int error + ), + + TP_ARGS(inode, name, error), + + TP_STRUCT__entry( + __field(unsigned long, error) + __field(dev_t, dev) + __field(u32, fhandle) + __field(u64, fileid) + __string(name, name) + ), + + TP_fast_assign( + __entry->error = error < 0 ? -error : 0; + __entry->dev = inode->i_sb->s_dev; + __entry->fileid = NFS_FILEID(inode); + __entry->fhandle = nfs_fhandle_hash(NFS_FH(inode)); + __assign_str(name, name); + ), + + TP_printk( + "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " + "name=%s", + -__entry->error, show_nfs4_status(__entry->error), + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->fileid, + __entry->fhandle, __get_str(name) + ) +); +#define DEFINE_NFS4_XATTR_EVENT(name) \ + DEFINE_EVENT(nfs4_xattr_event, name, \ + TP_PROTO( \ + const struct inode *inode, \ + const char *name, \ + int error \ + ), \ + TP_ARGS(inode, name, error)) +DEFINE_NFS4_XATTR_EVENT(nfs4_getxattr); +DEFINE_NFS4_XATTR_EVENT(nfs4_setxattr); +DEFINE_NFS4_XATTR_EVENT(nfs4_removexattr); + +DEFINE_NFS4_INODE_EVENT(nfs4_listxattr); #endif /* CONFIG_NFS_V4_2 */ #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index fa148308822c..620329b7e6ae 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -139,7 +139,7 @@ static int __init nfs_root_setup(char *line) ROOT_DEV = Root_NFS; if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) { - strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms)); + strscpy(nfs_root_parms, line, sizeof(nfs_root_parms)); } else { size_t n = strlen(line) + sizeof(NFS_ROOT) - 1; if (n >= sizeof(nfs_root_parms)) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 1f2801bfecd1..a5db5158c634 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -710,6 +710,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, u32 seq) { struct pnfs_layout_segment *lseg, *next; + struct nfs_server *server = NFS_SERVER(lo->plh_inode); int remaining = 0; dprintk("%s:Begin lo %p\n", __func__, lo); @@ -722,8 +723,10 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, "offset %llu length %llu\n", __func__, lseg, lseg->pls_range.iomode, lseg->pls_seq, lseg->pls_range.offset, lseg->pls_range.length); - if (!mark_lseg_invalid(lseg, tmp_list)) - remaining++; + if (mark_lseg_invalid(lseg, tmp_list)) + continue; + remaining++; + pnfs_lseg_cancel_io(server, lseg); } dprintk("%s:Return %i\n", __func__, remaining); return remaining; @@ -2485,6 +2488,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, u32 seq) { struct pnfs_layout_segment *lseg, *next; + struct nfs_server *server = NFS_SERVER(lo->plh_inode); int remaining = 0; dprintk("%s:Begin lo %p\n", __func__, lo); @@ -2507,6 +2511,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); + pnfs_lseg_cancel_io(server, lseg); } if (remaining) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f331f067691b..e3e6a41f19de 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -169,6 +169,8 @@ struct pnfs_layoutdriver_type { void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data); int (*prepare_layoutcommit) (struct nfs4_layoutcommit_args *args); int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args); + + void (*cancel_io)(struct pnfs_layout_segment *lseg); }; struct pnfs_commit_ops { @@ -685,6 +687,13 @@ pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page req_offset(req), req_last); } +static inline void pnfs_lseg_cancel_io(struct nfs_server *server, + struct pnfs_layout_segment *lseg) +{ + if (server->pnfs_curr_ld->cancel_io) + server->pnfs_curr_ld->cancel_io(lseg); +} + extern unsigned int layoutstats_timer; #ifdef NFS_DEBUG diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 657c242a18ff..987c88ddeaf0 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -374,12 +374,12 @@ pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, return NULL; } -/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head reqest +/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request * for @page * @cinfo - commit info for current inode * @page - page to search for matching head request * - * Returns a the head request if one is found, otherwise returns NULL. + * Return: the head request if one is found, otherwise %NULL. */ struct nfs_page * pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 9f4d9432d38a..b9d15c3df3cc 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1668,8 +1668,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) maxkey = nilfs_btree_node_get_key(node, nchildren - 1); nextmaxkey = (nchildren > 1) ? nilfs_btree_node_get_key(node, nchildren - 2) : 0; - if (bh != NULL) - brelse(bh); + brelse(bh); return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW); } @@ -1717,8 +1716,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *btree, ptrs[i] = le64_to_cpu(dptrs[i]); } - if (bh != NULL) - brelse(bh); + brelse(bh); return nitems; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 67f63cfeade5..232dd7b6cca1 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -328,6 +328,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; + struct buffer_head *bh; int err = -ENOMEM; ino_t ino; @@ -343,11 +344,25 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) ii->i_state = BIT(NILFS_I_NEW); ii->i_root = root; - err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ + if (unlikely(ino < NILFS_USER_INO)) { + nilfs_warn(sb, + "inode bitmap is inconsistent for reserved inodes"); + do { + brelse(bh); + err = nilfs_ifile_create_inode(root->ifile, &ino, &bh); + if (unlikely(err)) + goto failed_ifile_create_inode; + } while (ino < NILFS_USER_INO); + + nilfs_info(sb, "repaired inode bitmap for reserved inodes"); + } + ii->i_bh = bh; + atomic64_inc(&root->inodes_count); inode_init_owner(&init_user_ns, inode, dir, mode); inode->i_ino = ino; @@ -440,6 +455,8 @@ int nilfs_read_inode_common(struct inode *inode, inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); + if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode)) + return -EIO; /* this inode is for metadata and corrupted */ if (inode->i_nlink == 0) return -ESTALE; /* this inode is deleted */ diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 0afe0832c754..b4cebad21b48 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -875,9 +875,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); - } else - WARN_ON(err == -EINVAL || err == -ENOENT); - + } else if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint creation failed due to metadata corruption."); + err = -EIO; + } return err; } @@ -891,7 +893,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, &raw_cp, &bh_cp); if (unlikely(err)) { - WARN_ON(err == -EINVAL || err == -ENOENT); + if (err == -EINVAL || err == -ENOENT) { + nilfs_error(sci->sc_super, + "checkpoint finalization failed due to metadata corruption."); + err = -EIO; + } goto failed_ibh; } raw_cp->cp_snapshot_list.ssl_next = 0; @@ -2235,7 +2241,6 @@ int nilfs_construct_segment(struct super_block *sb) struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_sc_info *sci = nilfs->ns_writer; struct nilfs_transaction_info *ti; - int err; if (!sci) return -EROFS; @@ -2243,8 +2248,7 @@ int nilfs_construct_segment(struct super_block *sb) /* A call inside transactions causes a deadlock. */ BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); - err = nilfs_segctor_sync(sci); - return err; + return nilfs_segctor_sync(sci); } /** @@ -2786,10 +2790,9 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); err = nilfs_segctor_start_thread(nilfs->ns_writer); - if (err) { - kfree(nilfs->ns_writer); - nilfs->ns_writer = NULL; - } + if (unlikely(err)) + nilfs_detach_log_writer(sb); + return err; } diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 52615e6090e1..a3865bc4a0c6 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) { u8 *mrec_end = (u8 *)ctx->mrec + le32_to_cpu(ctx->mrec->bytes_allocated); - u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) + - a->name_length * sizeof(ntfschar); - if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end || - name_end > mrec_end) + u8 *name_end; + + /* check whether ATTR_RECORD wrap */ + if ((u8 *)a < (u8 *)ctx->mrec) + break; + + /* check whether Attribute Record Header is within bounds */ + if ((u8 *)a > mrec_end || + (u8 *)a + sizeof(ATTR_RECORD) > mrec_end) + break; + + /* check whether ATTR_RECORD's name is within bounds */ + name_end = (u8 *)a + le16_to_cpu(a->name_offset) + + a->name_length * sizeof(ntfschar); + if (name_end > mrec_end) break; + ctx->attr = a; if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || a->type == AT_END)) return -ENOENT; if (unlikely(!a->length)) break; + + /* check whether ATTR_RECORD's length wrap */ + if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a) + break; + /* check whether ATTR_RECORD's length is within bounds */ + if ((u8 *)a + le32_to_cpu(a->length) > mrec_end) + break; + if (a->type != type) continue; /* diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index db0f1995aedd..08c659332e26 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi) goto err_out; } + /* Sanity check offset to the first attribute */ + if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) { + ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.", + le16_to_cpu(m->attrs_offset)); + goto err_out; + } + /* Need this to sanity check attribute list references to $MFT. */ vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number); diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 638d875eccc7..7aebdbf5cc0a 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -527,7 +527,7 @@ struct ocfs2_extent_block * value -1 (0xFFFF) is OCFS2_INVALID_SLOT. This marks a slot empty. */ struct ocfs2_slot_map { -/*00*/ __le16 sm_slots[0]; +/*00*/ DECLARE_FLEX_ARRAY(__le16, sm_slots); /* * Actual on-disk size is one block. OCFS2_MAX_SLOTS is 255, * 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize. @@ -548,7 +548,7 @@ struct ocfs2_extended_slot { * i_size. */ struct ocfs2_slot_map_extended { -/*00*/ struct ocfs2_extended_slot se_slots[0]; +/*00*/ DECLARE_FLEX_ARRAY(struct ocfs2_extended_slot, se_slots); /* * Actual size is i_size of the slot_map system file. It should * match s_max_slots * sizeof(struct ocfs2_extended_slot) @@ -727,7 +727,7 @@ struct ocfs2_dinode { struct ocfs2_extent_list i_list; struct ocfs2_truncate_log i_dealloc; struct ocfs2_inline_data i_data; - __u8 i_symlink[0]; + DECLARE_FLEX_ARRAY(__u8, i_symlink); } id2; /* Actual on-disk size is one block */ }; @@ -892,7 +892,7 @@ struct ocfs2_group_desc /*30*/ struct ocfs2_block_check bg_check; /* Error checking */ __le64 bg_reserved2; /*40*/ union { - __u8 bg_bitmap[0]; + DECLARE_FLEX_ARRAY(__u8, bg_bitmap); struct { /* * Block groups may be discontiguous when diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 1358981e80a3..623db358b1ef 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2614,7 +2614,7 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, } /* - * Calculate out the start and number of virtual clusters we need to to CoW. + * Calculate out the start and number of virtual clusters we need to CoW. * * cpos is vitual start cluster position we want to do CoW in a * file and write_len is the cluster length. diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index dd77b7aaabf5..317126261523 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -334,10 +334,10 @@ int ocfs2_cluster_connect(const char *stack_name, goto out; } - strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); + strscpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); new_conn->cc_namelen = grouplen; if (cluster_name_len) - strlcpy(new_conn->cc_cluster_name, cluster_name, + strscpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1); new_conn->cc_cluster_name_len = cluster_name_len; new_conn->cc_recovery_handler = recovery_handler; diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index 5805a03d100b..9c74eace3adc 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -106,7 +106,7 @@ int ocfs2_claim_clusters(handle_t *handle, u32 *cluster_start, u32 *num_clusters); /* - * Use this variant of ocfs2_claim_clusters to specify a maxiumum + * Use this variant of ocfs2_claim_clusters to specify a maximum * number of clusters smaller than the allocation reserved. */ int __ocfs2_claim_clusters(handle_t *handle, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 26b4c2bfee49..42c993e53924 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2219,7 +2219,7 @@ static int ocfs2_initialize_super(struct super_block *sb, goto out_journal; } - strlcpy(osb->vol_label, di->id2.i_super.s_label, + strscpy(osb->vol_label, di->id2.i_super.s_label, OCFS2_MAX_VOL_LABEL_LEN); osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno); osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno); diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c index e2c2699d8016..9cacce5d55c1 100644 --- a/fs/orangefs/dir.c +++ b/fs/orangefs/dir.c @@ -398,7 +398,7 @@ static int orangefs_dir_release(struct inode *inode, struct file *file) const struct file_operations orangefs_dir_operations = { .llseek = orangefs_dir_llseek, .read = generic_read_dir, - .iterate = orangefs_dir_iterate, + .iterate_shared = orangefs_dir_iterate, .open = orangefs_dir_open, .release = orangefs_dir_release }; diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index c930001056f9..32b1116ae137 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -92,6 +92,7 @@ config PROC_PAGE_MONITOR config PROC_CHILDREN bool "Include /proc/<pid>/task/<tid>/children file" + depends on PROC_FS default n help Provides a fast way to retrieve first level children pids of a task. See diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 837971e74109..fe7bfcb7d049 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -4,6 +4,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/blkdev.h> +#include "internal.h" static int devinfo_show(struct seq_file *f, void *v) { @@ -54,7 +55,10 @@ static const struct seq_operations devinfo_ops = { static int __init proc_devices_init(void) { - proc_create_seq("devices", 0, NULL, &devinfo_ops); + struct proc_dir_entry *pde; + + pde = proc_create_seq("devices", 0, NULL, &devinfo_ops); + pde_make_permanent(pde); return 0; } fs_initcall(proc_devices_init); diff --git a/fs/proc/internal.h b/fs/proc/internal.h index f03000764ce5..b701d0207edf 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -79,6 +79,11 @@ static inline bool pde_is_permanent(const struct proc_dir_entry *pde) return pde->flags & PROC_ENTRY_PERMANENT; } +static inline void pde_make_permanent(struct proc_dir_entry *pde) +{ + pde->flags |= PROC_ENTRY_PERMANENT; +} + extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index f32878d9a39f..817981e57223 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -9,6 +9,7 @@ #include <linux/seq_file.h> #include <linux/seqlock.h> #include <linux/time.h> +#include "internal.h" static int loadavg_proc_show(struct seq_file *m, void *v) { @@ -27,7 +28,10 @@ static int loadavg_proc_show(struct seq_file *m, void *v) static int __init proc_loadavg_init(void) { - proc_create_single("loadavg", 0, NULL, loadavg_proc_show); + struct proc_dir_entry *pde; + + pde = proc_create_single("loadavg", 0, NULL, loadavg_proc_show); + pde_make_permanent(pde); return 0; } fs_initcall(proc_loadavg_init); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 208efd4fa52c..5101131e6047 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -164,7 +164,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) static int __init proc_meminfo_init(void) { - proc_create_single("meminfo", 0, NULL, meminfo_proc_show); + struct proc_dir_entry *pde; + + pde = proc_create_single("meminfo", 0, NULL, meminfo_proc_show); + pde_make_permanent(pde); return 0; } fs_initcall(proc_meminfo_init); diff --git a/fs/proc/page.c b/fs/proc/page.c index a2873a617ae8..f2273b164535 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -91,6 +91,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, } static const struct proc_ops kpagecount_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpagecount_read, }; @@ -268,6 +269,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, } static const struct proc_ops kpageflags_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpageflags_read, }; @@ -322,6 +324,7 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf, } static const struct proc_ops kpagecgroup_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpagecgroup_read, }; diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c index 12901dcf57e2..f4616083faef 100644 --- a/fs/proc/softirqs.c +++ b/fs/proc/softirqs.c @@ -3,6 +3,7 @@ #include <linux/kernel_stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include "internal.h" /* * /proc/softirqs ... display the number of softirqs @@ -27,7 +28,10 @@ static int show_softirqs(struct seq_file *p, void *v) static int __init proc_softirqs_init(void) { - proc_create_single("softirqs", 0, NULL, show_softirqs); + struct proc_dir_entry *pde; + + pde = proc_create_single("softirqs", 0, NULL, show_softirqs); + pde_make_permanent(pde); return 0; } fs_initcall(proc_softirqs_init); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index deb99bc9b7e6..b5343d209381 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -7,6 +7,7 @@ #include <linux/time.h> #include <linux/time_namespace.h> #include <linux/kernel_stat.h> +#include "internal.h" static int uptime_proc_show(struct seq_file *m, void *v) { @@ -39,7 +40,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) static int __init proc_uptime_init(void) { - proc_create_single("uptime", 0, NULL, uptime_proc_show); + struct proc_dir_entry *pde; + + pde = proc_create_single("uptime", 0, NULL, uptime_proc_show); + pde_make_permanent(pde); return 0; } fs_initcall(proc_uptime_init); diff --git a/fs/proc/version.c b/fs/proc/version.c index b449f186577f..02e3c3cd4a9a 100644 --- a/fs/proc/version.c +++ b/fs/proc/version.c @@ -5,6 +5,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/utsname.h> +#include "internal.h" static int version_proc_show(struct seq_file *m, void *v) { @@ -17,7 +18,10 @@ static int version_proc_show(struct seq_file *m, void *v) static int __init proc_version_init(void) { - proc_create_single("version", 0, NULL, version_proc_show); + struct proc_dir_entry *pde; + + pde = proc_create_single("version", 0, NULL, version_proc_show); + pde_make_permanent(pde); return 0; } fs_initcall(proc_version_init); diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index b9895afca9d1..85b2fa3b211c 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -470,10 +470,8 @@ out2: out1: iput(sbi->inodes); out: - if (bh1) - brelse(bh1); - if (bh2) - brelse(bh2); + brelse(bh1); + brelse(bh2); outnobh: kfree(qs); s->s_fs_info = NULL; diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 4a7cb16e9345..3dba8acf4e83 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -411,7 +411,7 @@ int reiserfs_proc_info_init(struct super_block *sb) char *s; /* Some block devices use /'s */ - strlcpy(b, sb->s_id, BDEVNAME_SIZE); + strscpy(b, sb->s_id, BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; @@ -441,7 +441,7 @@ int reiserfs_proc_info_done(struct super_block *sb) char *s; /* Some block devices use /'s */ - strlcpy(b, sb->s_id, BDEVNAME_SIZE); + strscpy(b, sb->s_id, BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index df30f11b4a46..699650f81970 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -126,7 +126,7 @@ static inline void put_unaligned_le24(const u32 val, void *p) __put_unaligned_le24(val, p); } -static inline void __put_unaligned_be48(const u64 val, __u8 *p) +static inline void __put_unaligned_be48(const u64 val, u8 *p) { *p++ = val >> 40; *p++ = val >> 32; diff --git a/include/kunit/assert.h b/include/kunit/assert.h index 4b52e12c2ae8..ace3de8d1ee7 100644 --- a/include/kunit/assert.h +++ b/include/kunit/assert.h @@ -42,16 +42,15 @@ struct kunit_loc { /** * struct kunit_assert - Data for printing a failed assertion or expectation. - * @format: a function which formats the data in this kunit_assert to a string. * * Represents a failed expectation/assertion. Contains all the data necessary to * format a string to a user reporting the failure. */ -struct kunit_assert { - void (*format)(const struct kunit_assert *assert, - const struct va_format *message, - struct string_stream *stream); -}; +struct kunit_assert {}; + +typedef void (*assert_format_t)(const struct kunit_assert *assert, + const struct va_format *message, + struct string_stream *stream); void kunit_assert_prologue(const struct kunit_loc *loc, enum kunit_assert_type type, @@ -72,16 +71,6 @@ void kunit_fail_assert_format(const struct kunit_assert *assert, struct string_stream *stream); /** - * KUNIT_INIT_FAIL_ASSERT_STRUCT - Initializer for &struct kunit_fail_assert. - * - * Initializes a &struct kunit_fail_assert. Intended to be used in - * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. - */ -#define KUNIT_INIT_FAIL_ASSERT_STRUCT { \ - .assert = { .format = kunit_fail_assert_format }, \ -} - -/** * struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE} * @assert: The parent of this type. * @condition: A string representation of a conditional expression. @@ -110,7 +99,6 @@ void kunit_unary_assert_format(const struct kunit_assert *assert, * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. */ #define KUNIT_INIT_UNARY_ASSERT_STRUCT(cond, expect_true) { \ - .assert = { .format = kunit_unary_assert_format }, \ .condition = cond, \ .expected_true = expect_true \ } @@ -145,7 +133,6 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert, * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. */ #define KUNIT_INIT_PTR_NOT_ERR_STRUCT(txt, val) { \ - .assert = { .format = kunit_ptr_not_err_assert_format }, \ .text = txt, \ .value = val \ } @@ -190,7 +177,6 @@ void kunit_binary_assert_format(const struct kunit_assert *assert, * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a binary assert like * kunit_binary_assert, kunit_binary_ptr_assert, etc. * - * @format_func: a function which formats the assert to a string. * @text_: Pointer to a kunit_binary_assert_text. * @left_val: The actual evaluated value of the expression in the left slot. * @right_val: The actual evaluated value of the expression in the right slot. @@ -200,11 +186,9 @@ void kunit_binary_assert_format(const struct kunit_assert *assert, * fields but with different types for left_val/right_val. * This is ultimately used by binary assertion macros like KUNIT_EXPECT_EQ, etc. */ -#define KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \ - text_, \ +#define KUNIT_INIT_BINARY_ASSERT_STRUCT(text_, \ left_val, \ right_val) { \ - .assert = { .format = format_func }, \ .text = text_, \ .left_value = left_val, \ .right_value = right_val \ diff --git a/include/kunit/resource.h b/include/kunit/resource.h index 09c2b34d1c61..cf6fb8f2ac1b 100644 --- a/include/kunit/resource.h +++ b/include/kunit/resource.h @@ -301,22 +301,6 @@ typedef bool (*kunit_resource_match_t)(struct kunit *test, void *match_data); /** - * kunit_resource_instance_match() - Match a resource with the same instance. - * @test: Test case to which the resource belongs. - * @res: The resource. - * @match_data: The resource pointer to match against. - * - * An instance of kunit_resource_match_t that matches a resource whose - * allocation matches @match_data. - */ -static inline bool kunit_resource_instance_match(struct kunit *test, - struct kunit_resource *res, - void *match_data) -{ - return res->data == match_data; -} - -/** * kunit_resource_name_match() - Match a resource with the same name. * @test: Test case to which the resource belongs. * @res: The resource. diff --git a/include/kunit/test.h b/include/kunit/test.h index 20cc4770cb3f..b1ab6b32216d 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -473,30 +473,30 @@ void kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, const struct kunit_assert *assert, + assert_format_t assert_format, const char *fmt, ...); -#define KUNIT_ASSERTION(test, assert_type, pass, assert_class, INITIALIZER, fmt, ...) do { \ - if (unlikely(!(pass))) { \ - static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \ - struct assert_class __assertion = INITIALIZER; \ - kunit_do_failed_assertion(test, \ - &__loc, \ - assert_type, \ - &__assertion.assert, \ - fmt, \ - ##__VA_ARGS__); \ - } \ +#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \ + static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \ + const struct assert_class __assertion = INITIALIZER; \ + kunit_do_failed_assertion(test, \ + &__loc, \ + assert_type, \ + &__assertion.assert, \ + assert_format, \ + fmt, \ + ##__VA_ARGS__); \ } while (0) #define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \ - KUNIT_ASSERTION(test, \ - assert_type, \ - false, \ - kunit_fail_assert, \ - KUNIT_INIT_FAIL_ASSERT_STRUCT, \ - fmt, \ - ##__VA_ARGS__) + _KUNIT_FAILED(test, \ + assert_type, \ + kunit_fail_assert, \ + kunit_fail_assert_format, \ + {}, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_FAIL() - Always causes a test to fail when evaluated. @@ -521,14 +521,19 @@ void kunit_do_failed_assertion(struct kunit *test, expected_true, \ fmt, \ ...) \ - KUNIT_ASSERTION(test, \ - assert_type, \ - !!(condition) == !!expected_true, \ - kunit_unary_assert, \ - KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \ - expected_true), \ - fmt, \ - ##__VA_ARGS__) +do { \ + if (likely(!!(condition) == !!expected_true)) \ + break; \ + \ + _KUNIT_FAILED(test, \ + assert_type, \ + kunit_unary_assert, \ + kunit_unary_assert_format, \ + KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \ + expected_true), \ + fmt, \ + ##__VA_ARGS__); \ +} while (0) #define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \ KUNIT_UNARY_ASSERTION(test, \ @@ -578,16 +583,18 @@ do { \ .right_text = #right, \ }; \ \ - KUNIT_ASSERTION(test, \ - assert_type, \ - __left op __right, \ - assert_class, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \ - &__text, \ - __left, \ - __right), \ - fmt, \ - ##__VA_ARGS__); \ + if (likely(__left op __right)) \ + break; \ + \ + _KUNIT_FAILED(test, \ + assert_type, \ + assert_class, \ + format_func, \ + KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \ + __left, \ + __right), \ + fmt, \ + ##__VA_ARGS__); \ } while (0) #define KUNIT_BINARY_INT_ASSERTION(test, \ @@ -636,16 +643,19 @@ do { \ .right_text = #right, \ }; \ \ - KUNIT_ASSERTION(test, \ - assert_type, \ - strcmp(__left, __right) op 0, \ - kunit_binary_str_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT(kunit_binary_str_assert_format,\ - &__text, \ - __left, \ - __right), \ - fmt, \ - ##__VA_ARGS__); \ + if (likely(strcmp(__left, __right) op 0)) \ + break; \ + \ + \ + _KUNIT_FAILED(test, \ + assert_type, \ + kunit_binary_str_assert, \ + kunit_binary_str_assert_format, \ + KUNIT_INIT_BINARY_ASSERT_STRUCT(&__text, \ + __left, \ + __right), \ + fmt, \ + ##__VA_ARGS__); \ } while (0) #define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \ @@ -656,14 +666,16 @@ do { \ do { \ const typeof(ptr) __ptr = (ptr); \ \ - KUNIT_ASSERTION(test, \ - assert_type, \ - !IS_ERR_OR_NULL(__ptr), \ - kunit_ptr_not_err_assert, \ - KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, \ - __ptr), \ - fmt, \ - ##__VA_ARGS__); \ + if (!IS_ERR_OR_NULL(__ptr)) \ + break; \ + \ + _KUNIT_FAILED(test, \ + assert_type, \ + kunit_ptr_not_err_assert, \ + kunit_ptr_not_err_assert_format, \ + KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, __ptr), \ + fmt, \ + ##__VA_ARGS__); \ } while (0) /** diff --git a/include/linux/bitops.h b/include/linux/bitops.h index d7dd83fafeba..2ba557e067fe 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -347,10 +347,10 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ typeof(*(ptr)) old__, new__; \ \ + old__ = READ_ONCE(*(ptr)); \ do { \ - old__ = READ_ONCE(*(ptr)); \ new__ = (old__ & ~mask__) | bits__; \ - } while (cmpxchg(ptr, old__, new__) != old__); \ + } while (!try_cmpxchg(ptr, &old__, new__)); \ \ old__; \ }) @@ -362,11 +362,12 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ typeof(*(ptr)) old__, new__; \ \ + old__ = READ_ONCE(*(ptr)); \ do { \ - old__ = READ_ONCE(*(ptr)); \ + if (old__ & test__) \ + break; \ new__ = old__ & ~clear__; \ - } while (!(old__ & test__) && \ - cmpxchg(ptr, old__, new__) != old__); \ + } while (!try_cmpxchg(ptr, &old__, new__)); \ \ !(old__ & test__); \ }) diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index e7f2fb2fc207..99c1726be6ee 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -207,7 +207,6 @@ struct ceph_msg_data_cursor { struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ - bool last_piece; /* current is last piece */ bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK @@ -498,8 +497,7 @@ void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq); void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor, struct ceph_msg *msg, size_t length); struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, - size_t *page_offset, size_t *length, - bool *last_piece); + size_t *page_offset, size_t *length); void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes); u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset, diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 84a466b176cf..d95ab85f96ba 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -253,7 +253,6 @@ static __always_inline void arch_exit_to_user_mode(void) { } /** * arch_do_signal_or_restart - Architecture specific signal delivery function * @regs: Pointer to currents pt_regs - * @has_signal: actual signal to handle * * Invoked from exit_to_user_mode_loop(). */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 95fda85aa195..8b4f93e84868 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -214,8 +214,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, struct page *follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift); -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags); +struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, + int flags); struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags); struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, @@ -327,8 +327,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma, return NULL; } -static inline struct page *follow_huge_pmd(struct mm_struct *mm, - unsigned long address, pmd_t *pmd, int flags) +static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, + unsigned long address, int flags) { return NULL; } diff --git a/include/linux/init.h b/include/linux/init.h index a0a90cd73ebe..077d7f93b402 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -134,7 +134,7 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry) extern initcall_entry_t __con_initcall_start[], __con_initcall_end[]; -/* Used for contructor calls. */ +/* Used for constructor calls. */ typedef void (*ctor_fn_t)(void); struct file_system_type; diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h new file mode 100644 index 000000000000..c006cf0a25f3 --- /dev/null +++ b/include/linux/iova_bitmap.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Oracle and/or its affiliates. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ +#ifndef _IOVA_BITMAP_H_ +#define _IOVA_BITMAP_H_ + +#include <linux/types.h> + +struct iova_bitmap; + +typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, + unsigned long iova, size_t length, + void *opaque); + +struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, + unsigned long page_size, + u64 __user *data); +void iova_bitmap_free(struct iova_bitmap *bitmap); +int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn); +void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length); + +#endif diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index e3e8c8662b49..e8240cf2611a 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -11,6 +11,7 @@ #include <linux/refcount.h> #include <linux/rhashtable-types.h> #include <linux/sysctl.h> +#include <linux/percpu_counter.h> struct user_namespace; @@ -36,8 +37,8 @@ struct ipc_namespace { unsigned int msg_ctlmax; unsigned int msg_ctlmnb; unsigned int msg_ctlmni; - atomic_t msg_bytes; - atomic_t msg_hdrs; + struct percpu_counter percpu_msg_bytes; + struct percpu_counter percpu_msg_hdrs; size_t shm_ctlmax; size_t shm_ctlall; diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 3a091d0710ae..d5e6024cb2a8 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -44,7 +44,8 @@ static const struct of_device_id drv_name##_irqchip_match_table[] = { #define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \ .data = typecheck_irq_init_cb(fn), }, -#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \ + +#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \ {}, \ }; \ MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \ @@ -56,6 +57,7 @@ static struct platform_driver drv_name##_driver = { \ .owner = THIS_MODULE, \ .of_match_table = drv_name##_irqchip_match_table, \ .suppress_bind_attrs = true, \ + __VA_ARGS__ \ }, \ }; \ builtin_platform_driver(drv_name##_driver) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 1cd4e36890fb..844a8e30e6de 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -169,6 +169,7 @@ int generic_handle_irq_safe(unsigned int irq); * conversion failed. */ int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); +int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq); int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq); #endif diff --git a/include/linux/iversion.h b/include/linux/iversion.h index 3bfebde5a1a6..e27bd4f55d84 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -123,17 +123,12 @@ inode_peek_iversion_raw(const struct inode *inode) static inline void inode_set_max_iversion_raw(struct inode *inode, u64 val) { - u64 cur, old; + u64 cur = inode_peek_iversion_raw(inode); - cur = inode_peek_iversion_raw(inode); - for (;;) { + do { if (cur > val) break; - old = atomic64_cmpxchg(&inode->i_version, cur, val); - if (likely(old == cur)) - break; - cur = old; - } + } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val)); } /** @@ -177,56 +172,7 @@ inode_set_iversion_queried(struct inode *inode, u64 val) I_VERSION_QUERIED); } -/** - * inode_maybe_inc_iversion - increments i_version - * @inode: inode with the i_version that should be updated - * @force: increment the counter even if it's not necessary? - * - * Every time the inode is modified, the i_version field must be seen to have - * changed by any observer. - * - * If "force" is set or the QUERIED flag is set, then ensure that we increment - * the value, and clear the queried flag. - * - * In the common case where neither is set, then we can return "false" without - * updating i_version. - * - * If this function returns false, and no other metadata has changed, then we - * can avoid logging the metadata. - */ -static inline bool -inode_maybe_inc_iversion(struct inode *inode, bool force) -{ - u64 cur, old, new; - - /* - * The i_version field is not strictly ordered with any other inode - * information, but the legacy inode_inc_iversion code used a spinlock - * to serialize increments. - * - * Here, we add full memory barriers to ensure that any de-facto - * ordering with other info is preserved. - * - * This barrier pairs with the barrier in inode_query_iversion() - */ - smp_mb(); - cur = inode_peek_iversion_raw(inode); - for (;;) { - /* If flag is clear then we needn't do anything */ - if (!force && !(cur & I_VERSION_QUERIED)) - return false; - - /* Since lowest bit is flag, add 2 to avoid it */ - new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT; - - old = atomic64_cmpxchg(&inode->i_version, cur, new); - if (likely(old == cur)) - break; - cur = old; - } - return true; -} - +bool inode_maybe_inc_iversion(struct inode *inode, bool force); /** * inode_inc_iversion - forcibly increment i_version @@ -304,10 +250,10 @@ inode_peek_iversion(const struct inode *inode) static inline u64 inode_query_iversion(struct inode *inode) { - u64 cur, old, new; + u64 cur, new; cur = inode_peek_iversion_raw(inode); - for (;;) { + do { /* If flag is already set, then no need to swap */ if (cur & I_VERSION_QUERIED) { /* @@ -320,11 +266,7 @@ inode_query_iversion(struct inode *inode) } new = cur | I_VERSION_QUERIED; - old = atomic64_cmpxchg(&inode->i_version, cur, new); - if (likely(old == cur)) - break; - cur = old; - } + } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new)); return cur >> I_VERSION_QUERIED_SHIFT; } diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 13e6c4b58f07..41a686996aaa 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -427,7 +427,7 @@ extern int kexec_load_disabled; extern bool kexec_in_progress; int crash_shrink_memory(unsigned long new_size); -size_t crash_get_memory_size(void); +ssize_t crash_get_memory_size(void); #ifndef arch_kexec_protect_crashkres /* diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 47ad3b104d9e..139d05b26f82 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -10,6 +10,9 @@ #ifndef MDEV_H #define MDEV_H +#include <linux/device.h> +#include <linux/uuid.h> + struct mdev_type; struct mdev_device { @@ -20,67 +23,67 @@ struct mdev_device { bool active; }; -static inline struct mdev_device *to_mdev_device(struct device *dev) -{ - return container_of(dev, struct mdev_device, dev); -} +struct mdev_type { + /* set by the driver before calling mdev_register parent: */ + const char *sysfs_name; + const char *pretty_name; -unsigned int mdev_get_type_group_id(struct mdev_device *mdev); -unsigned int mtype_get_type_group_id(struct mdev_type *mtype); -struct device *mtype_get_parent_dev(struct mdev_type *mtype); + /* set by the core, can be used drivers */ + struct mdev_parent *parent; -/* interface for exporting mdev supported type attributes */ -struct mdev_type_attribute { - struct attribute attr; - ssize_t (*show)(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf); - ssize_t (*store)(struct mdev_type *mtype, - struct mdev_type_attribute *attr, const char *buf, - size_t count); + /* internal only */ + struct kobject kobj; + struct kobject *devices_kobj; }; -#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \ -struct mdev_type_attribute mdev_type_attr_##_name = \ - __ATTR(_name, _mode, _show, _store) -#define MDEV_TYPE_ATTR_RW(_name) \ - struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name) -#define MDEV_TYPE_ATTR_RO(_name) \ - struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name) -#define MDEV_TYPE_ATTR_WO(_name) \ - struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name) +/* embedded into the struct device that the mdev devices hang off */ +struct mdev_parent { + struct device *dev; + struct mdev_driver *mdev_driver; + struct kset *mdev_types_kset; + /* Synchronize device creation/removal with parent unregistration */ + struct rw_semaphore unreg_sem; + struct mdev_type **types; + unsigned int nr_types; + atomic_t available_instances; +}; + +static inline struct mdev_device *to_mdev_device(struct device *dev) +{ + return container_of(dev, struct mdev_device, dev); +} /** * struct mdev_driver - Mediated device driver + * @device_api: string to return for the device_api sysfs + * @max_instances: maximum number of instances supported (optional) * @probe: called when new device created * @remove: called when device removed - * @supported_type_groups: Attributes to define supported types. It is mandatory - * to provide supported types. + * @get_available: Return the max number of instances that can be created + * @show_description: Print a description of the mtype * @driver: device driver structure - * **/ struct mdev_driver { + const char *device_api; + unsigned int max_instances; int (*probe)(struct mdev_device *dev); void (*remove)(struct mdev_device *dev); - struct attribute_group **supported_type_groups; + unsigned int (*get_available)(struct mdev_type *mtype); + ssize_t (*show_description)(struct mdev_type *mtype, char *buf); struct device_driver driver; }; -extern struct bus_type mdev_bus_type; - -int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); -void mdev_unregister_device(struct device *dev); +int mdev_register_parent(struct mdev_parent *parent, struct device *dev, + struct mdev_driver *mdev_driver, struct mdev_type **types, + unsigned int nr_types); +void mdev_unregister_parent(struct mdev_parent *parent); int mdev_register_driver(struct mdev_driver *drv); void mdev_unregister_driver(struct mdev_driver *drv); -struct device *mdev_parent_dev(struct mdev_device *mdev); static inline struct device *mdev_dev(struct mdev_device *mdev) { return &mdev->dev; } -static inline struct mdev_device *mdev_from_dev(struct device *dev) -{ - return dev->bus == &mdev_bus_type ? to_mdev_device(dev) : NULL; -} #endif /* MDEV_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8a30de08e913..c726ea781255 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -293,6 +293,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */ +#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 83fccd0c9bba..d6d3eae2f145 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -37,9 +37,8 @@ extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data); extern int of_irq_to_resource(struct device_node *dev, int index, struct resource *r); -extern void of_irq_init(const struct of_device_id *matches); - #ifdef CONFIG_OF_IRQ +extern void of_irq_init(const struct of_device_id *matches); extern int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq); extern int of_irq_count(struct device_node *dev); @@ -57,6 +56,9 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, extern void of_msi_configure(struct device *dev, struct device_node *np); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else +static inline void of_irq_init(const struct of_device_id *matches) +{ +} static inline int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq) { diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 01861eebed79..8ed5fba6d156 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -15,6 +15,9 @@ #include <linux/types.h> #include <linux/gfp.h> +/* percpu_counter batch for local add or sub */ +#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX + #ifdef CONFIG_SMP struct percpu_counter { @@ -56,6 +59,22 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } +/* + * With percpu_counter_add_local() and percpu_counter_sub_local(), counts + * are accumulated in local per cpu counter and not in fbc->count until + * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter + * write efficient. + * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be + * used to add up the counts from each CPU to account for all the local + * counts. So percpu_counter_add_local() and percpu_counter_sub_local() + * should be used when a counter is updated frequently and read rarely. + */ +static inline void +percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH); +} + static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { s64 ret = __percpu_counter_sum(fbc); @@ -138,6 +157,13 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) preempt_enable(); } +/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ +static inline void +percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add(fbc, amount); +} + static inline void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { @@ -193,4 +219,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) percpu_counter_add(fbc, -amount); } +static inline void +percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount) +{ + percpu_counter_add_local(fbc, -amount); +} + #endif /* _LINUX_PERCPU_COUNTER_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 81cab4b01edc..d6c48163c6de 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -127,6 +127,9 @@ static inline void put_task_struct_many(struct task_struct *t, int nr) void put_task_struct_rcu_user(struct task_struct *task); +/* Free all architecture-specific resources held by a thread. */ +void release_thread(struct task_struct *dead_task); + #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT extern int arch_task_struct_size __read_mostly; #else diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 75eea5ebb179..770ef2cb5775 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -246,6 +246,7 @@ void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt); +void rpc_clnt_disconnect(struct rpc_clnt *clnt); void rpc_cleanup_clids(void); static inline int rpc_reply_expected(struct rpc_task *task) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index baeca2f564dc..b8ca3ecaf8d7 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -209,11 +209,17 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); void rpc_put_task(struct rpc_task *); void rpc_put_task_async(struct rpc_task *); +bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status); +void rpc_task_try_cancel(struct rpc_task *task, int error); void rpc_signal_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_exit(struct rpc_task *, int); void rpc_release_calldata(const struct rpc_call_ops *, void *); void rpc_killall_tasks(struct rpc_clnt *); +unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, + bool (*fnmatch)(const struct rpc_task *, + const void *), + const void *data); void rpc_execute(struct rpc_task *); void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index e05ddc6fe6a5..e7cebeb875dd 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -14,6 +14,7 @@ #include <linux/workqueue.h> #include <linux/poll.h> #include <uapi/linux/vfio.h> +#include <linux/iova_bitmap.h> struct kvm; @@ -33,10 +34,11 @@ struct vfio_device { struct device *dev; const struct vfio_device_ops *ops; /* - * mig_ops is a static property of the vfio_device which must be set - * prior to registering the vfio_device. + * mig_ops/log_ops is a static property of the vfio_device which must + * be set prior to registering the vfio_device. */ const struct vfio_migration_ops *mig_ops; + const struct vfio_log_ops *log_ops; struct vfio_group *group; struct vfio_device_set *dev_set; struct list_head dev_set_list; @@ -45,7 +47,9 @@ struct vfio_device { struct kvm *kvm; /* Members below here are private, not for driver use */ - refcount_t refcount; + unsigned int index; + struct device device; /* device.kref covers object life circle */ + refcount_t refcount; /* user count on registered device*/ unsigned int open_count; struct completion comp; struct list_head group_next; @@ -55,6 +59,8 @@ struct vfio_device { /** * struct vfio_device_ops - VFIO bus driver device callbacks * + * @init: initialize private fields in device structure + * @release: Reclaim private fields in device structure * @open_device: Called when the first file descriptor is opened for this device * @close_device: Opposite of open_device * @read: Perform read(2) on device file descriptor @@ -72,6 +78,8 @@ struct vfio_device { */ struct vfio_device_ops { char *name; + int (*init)(struct vfio_device *vdev); + void (*release)(struct vfio_device *vdev); int (*open_device)(struct vfio_device *vdev); void (*close_device)(struct vfio_device *vdev); ssize_t (*read)(struct vfio_device *vdev, char __user *buf, @@ -109,6 +117,28 @@ struct vfio_migration_ops { }; /** + * @log_start: Optional callback to ask the device start DMA logging. + * @log_stop: Optional callback to ask the device stop DMA logging. + * @log_read_and_clear: Optional callback to ask the device read + * and clear the dirty DMAs in some given range. + * + * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set + * of features does not track logging state relative to the device, + * therefore the device implementation of vfio_log_ops must handle + * arbitrary user requests. This includes rejecting subsequent calls + * to log_start without an intervening log_stop, as well as graceful + * handling of log_stop and log_read_and_clear from invalid states. + */ +struct vfio_log_ops { + int (*log_start)(struct vfio_device *device, + struct rb_root_cached *ranges, u32 nnodes, u64 *page_size); + int (*log_stop)(struct vfio_device *device); + int (*log_read_and_clear)(struct vfio_device *device, + unsigned long iova, unsigned long length, + struct iova_bitmap *dirty); +}; + +/** * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl * @flags: Arg from the device_feature op * @argsz: Arg from the device_feature op @@ -137,9 +167,23 @@ static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops, return 1; } -void vfio_init_group_dev(struct vfio_device *device, struct device *dev, - const struct vfio_device_ops *ops); -void vfio_uninit_group_dev(struct vfio_device *device); +struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, + const struct vfio_device_ops *ops); +#define vfio_alloc_device(dev_struct, member, dev, ops) \ + container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \ + BUILD_BUG_ON_ZERO(offsetof( \ + struct dev_struct, member)), \ + dev, ops), \ + struct dev_struct, member) + +int vfio_init_device(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops); +void vfio_free_device(struct vfio_device *device); +static inline void vfio_put_device(struct vfio_device *device) +{ + put_device(&device->device); +} + int vfio_register_group_dev(struct vfio_device *device); int vfio_register_emulated_iommu_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device); @@ -155,6 +199,7 @@ int vfio_mig_get_next_state(struct vfio_device *device, * External user API */ struct iommu_group *vfio_file_iommu_group(struct file *file); +bool vfio_file_is_group(struct file *file); bool vfio_file_enforced_coherent(struct file *file); void vfio_file_set_kvm(struct file *file, struct kvm *kvm); bool vfio_file_has_dev(struct file *file, struct vfio_device *device); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 5579ece4347b..367fd79226a3 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -20,39 +20,10 @@ #define VFIO_PCI_CORE_H #define VFIO_PCI_OFFSET_SHIFT 40 - #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) -/* Special capability IDs predefined access */ -#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ -#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ - -/* Cap maximum number of ioeventfds per device (arbitrary) */ -#define VFIO_PCI_IOEVENTFD_MAX 1000 - -struct vfio_pci_ioeventfd { - struct list_head next; - struct vfio_pci_core_device *vdev; - struct virqfd *virqfd; - void __iomem *addr; - uint64_t data; - loff_t pos; - int bar; - int count; - bool test_mem; -}; - -struct vfio_pci_irq_ctx { - struct eventfd_ctx *trigger; - struct virqfd *unmask; - struct virqfd *mask; - char *name; - bool masked; - struct irq_bypass_producer producer; -}; - struct vfio_pci_core_device; struct vfio_pci_region; @@ -78,23 +49,6 @@ struct vfio_pci_region { u32 flags; }; -struct vfio_pci_dummy_resource { - struct resource resource; - int index; - struct list_head res_next; -}; - -struct vfio_pci_vf_token { - struct mutex lock; - uuid_t uuid; - int users; -}; - -struct vfio_pci_mmap_vma { - struct vm_area_struct *vma; - struct list_head vma_next; -}; - struct vfio_pci_core_device { struct vfio_device vdev; struct pci_dev *pdev; @@ -124,11 +78,14 @@ struct vfio_pci_core_device { bool needs_reset; bool nointx; bool needs_pm_restore; + bool pm_intx_masked; + bool pm_runtime_engaged; struct pci_saved_state *pci_saved_state; struct pci_saved_state *pm_save; int ioeventfds_nr; struct eventfd_ctx *err_trigger; struct eventfd_ctx *req_trigger; + struct eventfd_ctx *pm_wake_eventfd_ctx; struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; @@ -141,100 +98,17 @@ struct vfio_pci_core_device { struct rw_semaphore memory_lock; }; -#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) -#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX) -#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX) -#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) -#define irq_is(vdev, type) (vdev->irq_type == type) - -void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); -void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); - -int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, - uint32_t flags, unsigned index, - unsigned start, unsigned count, void *data); - -ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, - char __user *buf, size_t count, - loff_t *ppos, bool iswrite); - -ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, - size_t count, loff_t *ppos, bool iswrite); - -#ifdef CONFIG_VFIO_PCI_VGA -ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, - size_t count, loff_t *ppos, bool iswrite); -#else -static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, - char __user *buf, size_t count, - loff_t *ppos, bool iswrite) -{ - return -EINVAL; -} -#endif - -long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, - uint64_t data, int count, int fd); - -int vfio_pci_init_perm_bits(void); -void vfio_pci_uninit_perm_bits(void); - -int vfio_config_init(struct vfio_pci_core_device *vdev); -void vfio_config_free(struct vfio_pci_core_device *vdev); - -int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, - unsigned int type, unsigned int subtype, - const struct vfio_pci_regops *ops, - size_t size, u32 flags, void *data); - -int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, - pci_power_t state); - -bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); -void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev); -u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); -void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, - u16 cmd); - -#ifdef CONFIG_VFIO_PCI_IGD -int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); -#else -static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) -{ - return -ENODEV; -} -#endif - -#ifdef CONFIG_VFIO_PCI_ZDEV_KVM -int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, - struct vfio_info_cap *caps); -int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev); -void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev); -#else -static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, - struct vfio_info_cap *caps) -{ - return -ENODEV; -} - -static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev) -{ - return 0; -} - -static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev) -{} -#endif - /* Will be exported for vfio pci drivers usage */ +int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga, bool is_disable_idle_d3); void vfio_pci_core_close_device(struct vfio_device *core_vdev); -void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, - struct pci_dev *pdev, - const struct vfio_device_ops *vfio_pci_ops); +int vfio_pci_core_init_dev(struct vfio_device *core_vdev); +void vfio_pci_core_release_dev(struct vfio_device *core_vdev); int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev); -void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev); void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev); extern const struct pci_error_handlers vfio_pci_core_err_handlers; int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev, @@ -256,9 +130,4 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, pci_channel_state_t state); -static inline bool vfio_pci_is_vga(struct pci_dev *pdev) -{ - return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; -} - #endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h index 2d1b54556eff..e6e34d74dda0 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -26,7 +26,15 @@ struct compat_iw_point { struct __compat_iw_event { __u16 len; /* Real length of this stuff */ __u16 cmd; /* Wireless IOCTL */ - compat_caddr_t pointer; + + union { + compat_caddr_t pointer; + + /* we need ptr_bytes to make memcpy() run-time destination + * buffer bounds checking happy, nothing special + */ + DECLARE_FLEX_ARRAY(__u8, ptr_bytes); + }; }; #define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) #define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index a8994f307fc3..03b64bf876a4 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -185,21 +185,27 @@ static inline int ieee802154_sockaddr_check_size(struct sockaddr_ieee802154 *daddr, int len) { struct ieee802154_addr_sa *sa; + int ret = 0; sa = &daddr->addr; if (len < IEEE802154_MIN_NAMELEN) return -EINVAL; switch (sa->addr_type) { + case IEEE802154_ADDR_NONE: + break; case IEEE802154_ADDR_SHORT: if (len < IEEE802154_NAMELEN_SHORT) - return -EINVAL; + ret = -EINVAL; break; case IEEE802154_ADDR_LONG: if (len < IEEE802154_NAMELEN_LONG) - return -EINVAL; + ret = -EINVAL; + break; + default: + ret = -EINVAL; break; } - return 0; + return ret; } static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index d664ba5812d8..37943ba3a73c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1182,6 +1182,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); +void inet6_cleanup_sock(struct sock *sk); +void inet6_sock_destruct(struct sock *sk); int inet6_release(struct socket *sock); int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, diff --git a/include/net/udp.h b/include/net/udp.h index 5ee88ddf79c3..fee053bcd17c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -247,7 +247,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, } /* net/ipv4/udp.c */ -void udp_destruct_sock(struct sock *sk); +void udp_destruct_common(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/udplite.h b/include/net/udplite.h index 0143b373602e..299c14ce2bb9 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -25,14 +25,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset, return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT; } -/* Designate sk as UDP-Lite socket */ -static inline int udplite_sk_init(struct sock *sk) -{ - udp_init_sock(sk); - udp_sk(sk)->pcflag = UDPLITE_BIT; - return 0; -} - /* * Checksumming routines */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 733a1cddde30..d7d8e0922376 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -986,6 +986,148 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_RUNNING_P2P = 5, }; +/* + * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power + * state with the platform-based power management. Device use of lower power + * states depends on factors managed by the runtime power management core, + * including system level support and coordinating support among dependent + * devices. Enabling device low power entry does not guarantee lower power + * usage by the device, nor is a mechanism provided through this feature to + * know the current power state of the device. If any device access happens + * (either from the host or through the vfio uAPI) when the device is in the + * low power state, then the host will move the device out of the low power + * state as necessary prior to the access. Once the access is completed, the + * device may re-enter the low power state. For single shot low power support + * with wake-up notification, see + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP below. Access to mmap'd + * device regions is disabled on LOW_POWER_ENTRY and may only be resumed after + * calling LOW_POWER_EXIT. + */ +#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY 3 + +/* + * This device feature has the same behavior as + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY with the exception that the user + * provides an eventfd for wake-up notification. When the device moves out of + * the low power state for the wake-up, the host will not allow the device to + * re-enter a low power state without a subsequent user call to one of the low + * power entry device feature IOCTLs. Access to mmap'd device regions is + * disabled on LOW_POWER_ENTRY_WITH_WAKEUP and may only be resumed after the + * low power exit. The low power exit can happen either through LOW_POWER_EXIT + * or through any other access (where the wake-up notification has been + * generated). The access to mmap'd device regions will not trigger low power + * exit. + * + * The notification through the provided eventfd will be generated only when + * the device has entered and is resumed from a low power state after + * calling this device feature IOCTL. A device that has not entered low power + * state, as managed through the runtime power management core, will not + * generate a notification through the provided eventfd on access. Calling the + * LOW_POWER_EXIT feature is optional in the case where notification has been + * signaled on the provided eventfd that a resume from low power has occurred. + */ +struct vfio_device_low_power_entry_with_wakeup { + __s32 wakeup_eventfd; + __u32 reserved; +}; + +#define VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP 4 + +/* + * Upon VFIO_DEVICE_FEATURE_SET, disallow use of device low power states as + * previously enabled via VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY or + * VFIO_DEVICE_FEATURE_LOW_POWER_ENTRY_WITH_WAKEUP device features. + * This device feature IOCTL may itself generate a wakeup eventfd notification + * in the latter case if the device had previously entered a low power state. + */ +#define VFIO_DEVICE_FEATURE_LOW_POWER_EXIT 5 + +/* + * Upon VFIO_DEVICE_FEATURE_SET start/stop device DMA logging. + * VFIO_DEVICE_FEATURE_PROBE can be used to detect if the device supports + * DMA logging. + * + * DMA logging allows a device to internally record what DMAs the device is + * initiating and report them back to userspace. It is part of the VFIO + * migration infrastructure that allows implementing dirty page tracking + * during the pre copy phase of live migration. Only DMA WRITEs are logged, + * and this API is not connected to VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. + * + * When DMA logging is started a range of IOVAs to monitor is provided and the + * device can optimize its logging to cover only the IOVA range given. Each + * DMA that the device initiates inside the range will be logged by the device + * for later retrieval. + * + * page_size is an input that hints what tracking granularity the device + * should try to achieve. If the device cannot do the hinted page size then + * it's the driver choice which page size to pick based on its support. + * On output the device will return the page size it selected. + * + * ranges is a pointer to an array of + * struct vfio_device_feature_dma_logging_range. + * + * The core kernel code guarantees to support by minimum num_ranges that fit + * into a single kernel page. User space can try higher values but should give + * up if the above can't be achieved as of some driver limitations. + * + * A single call to start device DMA logging can be issued and a matching stop + * should follow at the end. Another start is not allowed in the meantime. + */ +struct vfio_device_feature_dma_logging_control { + __aligned_u64 page_size; + __u32 num_ranges; + __u32 __reserved; + __aligned_u64 ranges; +}; + +struct vfio_device_feature_dma_logging_range { + __aligned_u64 iova; + __aligned_u64 length; +}; + +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_START 6 + +/* + * Upon VFIO_DEVICE_FEATURE_SET stop device DMA logging that was started + * by VFIO_DEVICE_FEATURE_DMA_LOGGING_START + */ +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP 7 + +/* + * Upon VFIO_DEVICE_FEATURE_GET read back and clear the device DMA log + * + * Query the device's DMA log for written pages within the given IOVA range. + * During querying the log is cleared for the IOVA range. + * + * bitmap is a pointer to an array of u64s that will hold the output bitmap + * with 1 bit reporting a page_size unit of IOVA. The mapping of IOVA to bits + * is given by: + * bitmap[(addr - iova)/page_size] & (1ULL << (addr % 64)) + * + * The input page_size can be any power of two value and does not have to + * match the value given to VFIO_DEVICE_FEATURE_DMA_LOGGING_START. The driver + * will format its internal logging to match the reporting page size, possibly + * by replicating bits if the internal page size is lower than requested. + * + * The LOGGING_REPORT will only set bits in the bitmap and never clear or + * perform any initialization of the user provided bitmap. + * + * If any error is returned userspace should assume that the dirty log is + * corrupted. Error recovery is to consider all memory dirty and try to + * restart the dirty tracking, or to abort/restart the whole migration. + * + * If DMA logging is not enabled, an error will be returned. + * + */ +struct vfio_device_feature_dma_logging_report { + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 bitmap; +}; + +#define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8 + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index dae0f350c678..a34f4271a2e9 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -219,6 +219,7 @@ static inline void xen_preemptible_hcall_end(void) { } void xen_grant_setup_dma_ops(struct device *dev); bool xen_is_grant_dma_device(struct device *dev); bool xen_virtio_mem_acc(struct virtio_device *dev); +bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); #else static inline void xen_grant_setup_dma_ops(struct device *dev) { @@ -234,6 +235,11 @@ static inline bool xen_virtio_mem_acc(struct virtio_device *dev) { return false; } + +static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) +{ + return false; +} #endif /* CONFIG_XEN_GRANT_DMA_OPS */ #endif /* INCLUDE_XEN_OPS_H */ diff --git a/init/Kconfig b/init/Kconfig index d6c4302b1c08..694f7c160c9c 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1267,6 +1267,7 @@ endif # NAMESPACES config CHECKPOINT_RESTORE bool "Checkpoint/restore support" + depends on PROC_FS select PROC_CHILDREN select KCMP default n diff --git a/init/do_mounts.c b/init/do_mounts.c index 7058e14ad5f7..811e94daf0a8 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(name_to_dev_t); static int __init root_dev_setup(char *line) { - strlcpy(saved_root_name, line, sizeof(saved_root_name)); + strscpy(saved_root_name, line, sizeof(saved_root_name)); return 1; } @@ -343,7 +343,7 @@ static int __init split_fs_names(char *page, size_t size, char *names) int count = 1; char *p = page; - strlcpy(p, root_fs_names, size); + strscpy(p, root_fs_names, size); while (*p++) { if (p[-1] == ',') { p[-1] = '\0'; diff --git a/init/initramfs.c b/init/initramfs.c index 18229cfe8906..2f5bfb7d7652 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -482,7 +482,7 @@ static long __init flush_buffer(void *bufv, unsigned long len) return origLen; } -static unsigned long my_inptr; /* index of next byte to be processed in inbuf */ +static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */ #include <linux/decompress/generic.h> diff --git a/init/main.c b/init/main.c index 61d735ba2ffb..aa21add5f7c5 100644 --- a/init/main.c +++ b/init/main.c @@ -424,7 +424,7 @@ static void __init setup_boot_config(void) if (!data) data = xbc_get_embedded_bootconfig(&size); - strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); + strscpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL, bootconfig_params); @@ -764,7 +764,7 @@ void __init parse_early_param(void) return; /* All fall through to do_early_param. */ - strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); + strscpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); parse_early_options(tmp_cmdline); done = 1; } @@ -1244,7 +1244,7 @@ __setup("initcall_blacklist=", initcall_blacklist); static __init_or_module void trace_initcall_start_cb(void *data, initcall_t fn) { - ktime_t *calltime = (ktime_t *)data; + ktime_t *calltime = data; printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current)); *calltime = ktime_get(); @@ -1253,7 +1253,7 @@ trace_initcall_start_cb(void *data, initcall_t fn) static __init_or_module void trace_initcall_finish_cb(void *data, initcall_t fn, int ret) { - ktime_t rettime, *calltime = (ktime_t *)data; + ktime_t rettime, *calltime = data; rettime = ktime_get(); printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n", diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 9cf314b3f079..467a194b8a2e 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -986,8 +986,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) out_unlock: inode_unlock(d_inode(mnt->mnt_root)); - if (inode) - iput(inode); + iput(inode); mnt_drop_write(mnt); out_name: putname(name); diff --git a/ipc/msg.c b/ipc/msg.c index a0d05775af2c..e4e0990e08f7 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -39,6 +39,7 @@ #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <linux/rhashtable.h> +#include <linux/percpu_counter.h> #include <asm/current.h> #include <linux/uaccess.h> @@ -285,10 +286,10 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) rcu_read_unlock(); list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { - atomic_dec(&ns->msg_hdrs); + percpu_counter_sub_local(&ns->percpu_msg_hdrs, 1); free_msg(msg); } - atomic_sub(msq->q_cbytes, &ns->msg_bytes); + percpu_counter_sub_local(&ns->percpu_msg_bytes, msq->q_cbytes); ipc_update_pid(&msq->q_lspid, NULL); ipc_update_pid(&msq->q_lrpid, NULL); ipc_rcu_putref(&msq->q_perm, msg_rcu_free); @@ -495,17 +496,22 @@ static int msgctl_info(struct ipc_namespace *ns, int msqid, msginfo->msgssz = MSGSSZ; msginfo->msgseg = MSGSEG; down_read(&msg_ids(ns).rwsem); - if (cmd == MSG_INFO) { + if (cmd == MSG_INFO) msginfo->msgpool = msg_ids(ns).in_use; - msginfo->msgmap = atomic_read(&ns->msg_hdrs); - msginfo->msgtql = atomic_read(&ns->msg_bytes); + max_idx = ipc_get_maxidx(&msg_ids(ns)); + up_read(&msg_ids(ns).rwsem); + if (cmd == MSG_INFO) { + msginfo->msgmap = min_t(int, + percpu_counter_sum(&ns->percpu_msg_hdrs), + INT_MAX); + msginfo->msgtql = min_t(int, + percpu_counter_sum(&ns->percpu_msg_bytes), + INT_MAX); } else { msginfo->msgmap = MSGMAP; msginfo->msgpool = MSGPOOL; msginfo->msgtql = MSGTQL; } - max_idx = ipc_get_maxidx(&msg_ids(ns)); - up_read(&msg_ids(ns).rwsem); return (max_idx < 0) ? 0 : max_idx; } @@ -935,8 +941,8 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext, list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; - atomic_add(msgsz, &ns->msg_bytes); - atomic_inc(&ns->msg_hdrs); + percpu_counter_add_local(&ns->percpu_msg_bytes, msgsz); + percpu_counter_add_local(&ns->percpu_msg_hdrs, 1); } err = 0; @@ -1159,8 +1165,8 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in msq->q_rtime = ktime_get_real_seconds(); ipc_update_pid(&msq->q_lrpid, task_tgid(current)); msq->q_cbytes -= msg->m_ts; - atomic_sub(msg->m_ts, &ns->msg_bytes); - atomic_dec(&ns->msg_hdrs); + percpu_counter_sub_local(&ns->percpu_msg_bytes, msg->m_ts); + percpu_counter_sub_local(&ns->percpu_msg_hdrs, 1); ss_wakeup(msq, &wake_q, false); goto out_unlock0; @@ -1297,20 +1303,34 @@ COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, } #endif -void msg_init_ns(struct ipc_namespace *ns) +int msg_init_ns(struct ipc_namespace *ns) { + int ret; + ns->msg_ctlmax = MSGMAX; ns->msg_ctlmnb = MSGMNB; ns->msg_ctlmni = MSGMNI; - atomic_set(&ns->msg_bytes, 0); - atomic_set(&ns->msg_hdrs, 0); + ret = percpu_counter_init(&ns->percpu_msg_bytes, 0, GFP_KERNEL); + if (ret) + goto fail_msg_bytes; + ret = percpu_counter_init(&ns->percpu_msg_hdrs, 0, GFP_KERNEL); + if (ret) + goto fail_msg_hdrs; ipc_init_ids(&ns->ids[IPC_MSG_IDS]); + return 0; + +fail_msg_hdrs: + percpu_counter_destroy(&ns->percpu_msg_bytes); +fail_msg_bytes: + return ret; } #ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { + percpu_counter_destroy(&ns->percpu_msg_bytes); + percpu_counter_destroy(&ns->percpu_msg_hdrs); free_ipcs(ns, &msg_ids(ns), freeque); idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); rhashtable_destroy(&ns->ids[IPC_MSG_IDS].key_ht); diff --git a/ipc/namespace.c b/ipc/namespace.c index e1fcaedba4fa..8316ea585733 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -66,8 +66,11 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, if (!setup_ipc_sysctls(ns)) goto fail_mq; + err = msg_init_ns(ns); + if (err) + goto fail_put; + sem_init_ns(ns); - msg_init_ns(ns); shm_init_ns(ns); return ns; diff --git a/ipc/util.c b/ipc/util.c index a2208d0f26b2..05cb9de66735 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -782,28 +782,37 @@ struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s) return iter->pid_ns; } -/* - * This routine locks the ipc structure found at least at position pos. +/** + * sysvipc_find_ipc - Find and lock the ipc structure based on seq pos + * @ids: ipc identifier set + * @pos: expected position + * + * The function finds an ipc structure, based on the sequence file + * position @pos. If there is no ipc structure at position @pos, then + * the successor is selected. + * If a structure is found, then it is locked (both rcu_read_lock() and + * ipc_lock_object()) and @pos is set to the position needed to locate + * the found ipc structure. + * If nothing is found (i.e. EOF), @pos is not modified. + * + * The function returns the found ipc structure, or NULL at EOF. */ -static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, - loff_t *new_pos) +static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t *pos) { - struct kern_ipc_perm *ipc = NULL; - int max_idx = ipc_get_maxidx(ids); + int tmpidx; + struct kern_ipc_perm *ipc; - if (max_idx == -1 || pos > max_idx) - goto out; + /* convert from position to idr index -> "-1" */ + tmpidx = *pos - 1; - for (; pos <= max_idx; pos++) { - ipc = idr_find(&ids->ipcs_idr, pos); - if (ipc != NULL) { - rcu_read_lock(); - ipc_lock_object(ipc); - break; - } + ipc = idr_get_next(&ids->ipcs_idr, &tmpidx); + if (ipc != NULL) { + rcu_read_lock(); + ipc_lock_object(ipc); + + /* convert from idr index to position -> "+1" */ + *pos = tmpidx + 1; } -out: - *new_pos = pos + 1; return ipc; } @@ -817,11 +826,13 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); - return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); + /* Next -> search for *pos+1 */ + (*pos)++; + return sysvipc_find_ipc(&iter->ns->ids[iface->ids], pos); } /* - * File positions: pos 0 -> header, pos n -> ipc id = n - 1. + * File positions: pos 0 -> header, pos n -> ipc idx = n - 1. * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. */ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) @@ -846,8 +857,8 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - /* Find the (pos-1)th ipc */ - return sysvipc_find_ipc(ids, *pos - 1, pos); + /* Otherwise return the correct ipc structure */ + return sysvipc_find_ipc(ids, pos); } static void sysvipc_proc_stop(struct seq_file *s, void *it) diff --git a/ipc/util.h b/ipc/util.h index 2dd7ce0416d8..b2906e366539 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -64,7 +64,7 @@ static inline void mq_put_mnt(struct ipc_namespace *ns) { } #ifdef CONFIG_SYSVIPC void sem_init_ns(struct ipc_namespace *ns); -void msg_init_ns(struct ipc_namespace *ns); +int msg_init_ns(struct ipc_namespace *ns); void shm_init_ns(struct ipc_namespace *ns); void sem_exit_ns(struct ipc_namespace *ns); @@ -72,7 +72,7 @@ void msg_exit_ns(struct ipc_namespace *ns); void shm_exit_ns(struct ipc_namespace *ns); #else static inline void sem_init_ns(struct ipc_namespace *ns) { } -static inline void msg_init_ns(struct ipc_namespace *ns) { } +static inline int msg_init_ns(struct ipc_namespace *ns) { return 0; } static inline void shm_init_ns(struct ipc_namespace *ns) { } static inline void sem_exit_ns(struct ipc_namespace *ns) { } diff --git a/kernel/exit.c b/kernel/exit.c index 98c859ffa728..35e0a31a0315 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -184,6 +184,10 @@ void put_task_struct_rcu_user(struct task_struct *task) call_rcu(&task->rcu, delayed_put_task_struct); } +void __weak release_thread(struct task_struct *dead_task) +{ +} + void release_task(struct task_struct *p) { struct task_struct *leader; diff --git a/kernel/fail_function.c b/kernel/fail_function.c index 60dc825ecc2b..a7ccd2930c5f 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -247,15 +247,11 @@ static ssize_t fei_write(struct file *file, const char __user *buffer, /* cut off if it is too long */ if (count > KSYM_NAME_LEN) count = KSYM_NAME_LEN; - buf = kmalloc(count + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - if (copy_from_user(buf, buffer, count)) { - ret = -EFAULT; - goto out_free; - } - buf[count] = '\0'; + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + sym = strstrip(buf); mutex_lock(&fei_lock); @@ -298,17 +294,15 @@ static ssize_t fei_write(struct file *file, const char __user *buffer, } ret = register_kprobe(&attr->kp); - if (!ret) - fei_debugfs_add_attr(attr); - if (ret < 0) - fei_attr_remove(attr); - else { - list_add_tail(&attr->list, &fei_attr_list); - ret = count; + if (ret) { + fei_attr_free(attr); + goto out; } + fei_debugfs_add_attr(attr); + list_add_tail(&attr->list, &fei_attr_list); + ret = count; out: mutex_unlock(&fei_lock); -out_free: kfree(buf); return ret; } diff --git a/kernel/fork.c b/kernel/fork.c index 5f986a32d0e6..08969f5aa38d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -97,7 +97,6 @@ #include <linux/scs.h> #include <linux/io_uring.h> #include <linux/bpf.h> -#include <linux/sched/mm.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 5db0230aa6b5..a91f9001103c 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -705,6 +705,30 @@ int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) } EXPORT_SYMBOL_GPL(generic_handle_domain_irq); + /** + * generic_handle_irq_safe - Invoke the handler for a HW irq belonging + * to a domain from any context. + * @domain: The domain where to perform the lookup + * @hwirq: The HW irq number to convert to a logical one + * + * Returns: 0 on success, a negative value on error. + * + * This function can be called from any context (IRQ or process + * context). If the interrupt is marked as 'enforce IRQ-context only' then + * the function must be invoked from hard interrupt context. + */ +int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq)); + local_irq_restore(flags); + return ret; +} +EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); + /** * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging * to a domain. diff --git a/kernel/kexec.c b/kernel/kexec.c index b5e40f069768..cb8e6e6f983c 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -93,13 +93,10 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, /* * Because we write directly to the reserved memory region when loading - * crash kernels we need a mutex here to prevent multiple crash kernels - * from attempting to load simultaneously, and to prevent a crash kernel - * from loading over the top of a in use crash kernel. - * - * KISS: always take the mutex. + * crash kernels we need a serialization here to prevent multiple crash + * kernels from attempting to load simultaneously. */ - if (!mutex_trylock(&kexec_mutex)) + if (!kexec_trylock()) return -EBUSY; if (flags & KEXEC_ON_CRASH) { @@ -165,7 +162,7 @@ out: kimage_free(image); out_unlock: - mutex_unlock(&kexec_mutex); + kexec_unlock(); return ret; } diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index acd029b307e4..ca2743f9c634 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -46,7 +46,7 @@ #include <crypto/hash.h> #include "kexec_internal.h" -DEFINE_MUTEX(kexec_mutex); +atomic_t __kexec_lock = ATOMIC_INIT(0); /* Per cpu memory for storing cpu states in case of system crash. */ note_buf_t __percpu *crash_notes; @@ -809,7 +809,7 @@ static int kimage_load_normal_segment(struct kimage *image, if (result < 0) goto out; - ptr = kmap(page); + ptr = kmap_local_page(page); /* Start with a clear page */ clear_page(ptr); ptr += maddr & ~PAGE_MASK; @@ -822,7 +822,7 @@ static int kimage_load_normal_segment(struct kimage *image, memcpy(ptr, kbuf, uchunk); else result = copy_from_user(ptr, buf, uchunk); - kunmap(page); + kunmap_local(ptr); if (result) { result = -EFAULT; goto out; @@ -873,7 +873,7 @@ static int kimage_load_crash_segment(struct kimage *image, goto out; } arch_kexec_post_alloc_pages(page_address(page), 1, 0); - ptr = kmap(page); + ptr = kmap_local_page(page); ptr += maddr & ~PAGE_MASK; mchunk = min_t(size_t, mbytes, PAGE_SIZE - (maddr & ~PAGE_MASK)); @@ -889,7 +889,7 @@ static int kimage_load_crash_segment(struct kimage *image, else result = copy_from_user(ptr, buf, uchunk); kexec_flush_icache_page(page); - kunmap(page); + kunmap_local(ptr); arch_kexec_pre_free_pages(page_address(page), 1); if (result) { result = -EFAULT; @@ -959,7 +959,7 @@ late_initcall(kexec_core_sysctl_init); */ void __noclone __crash_kexec(struct pt_regs *regs) { - /* Take the kexec_mutex here to prevent sys_kexec_load + /* Take the kexec_lock here to prevent sys_kexec_load * running on one cpu from replacing the crash kernel * we are using after a panic on a different cpu. * @@ -967,7 +967,7 @@ void __noclone __crash_kexec(struct pt_regs *regs) * of memory the xchg(&kexec_crash_image) would be * sufficient. But since I reuse the memory... */ - if (mutex_trylock(&kexec_mutex)) { + if (kexec_trylock()) { if (kexec_crash_image) { struct pt_regs fixed_regs; @@ -976,7 +976,7 @@ void __noclone __crash_kexec(struct pt_regs *regs) machine_crash_shutdown(&fixed_regs); machine_kexec(kexec_crash_image); } - mutex_unlock(&kexec_mutex); + kexec_unlock(); } } STACK_FRAME_NON_STANDARD(__crash_kexec); @@ -1004,14 +1004,17 @@ void crash_kexec(struct pt_regs *regs) } } -size_t crash_get_memory_size(void) +ssize_t crash_get_memory_size(void) { - size_t size = 0; + ssize_t size = 0; + + if (!kexec_trylock()) + return -EBUSY; - mutex_lock(&kexec_mutex); if (crashk_res.end != crashk_res.start) size = resource_size(&crashk_res); - mutex_unlock(&kexec_mutex); + + kexec_unlock(); return size; } @@ -1022,7 +1025,8 @@ int crash_shrink_memory(unsigned long new_size) unsigned long old_size; struct resource *ram_res; - mutex_lock(&kexec_mutex); + if (!kexec_trylock()) + return -EBUSY; if (kexec_crash_image) { ret = -ENOENT; @@ -1060,7 +1064,7 @@ int crash_shrink_memory(unsigned long new_size) insert_resource(&iomem_resource, ram_res); unlock: - mutex_unlock(&kexec_mutex); + kexec_unlock(); return ret; } @@ -1132,7 +1136,7 @@ int kernel_kexec(void) { int error = 0; - if (!mutex_trylock(&kexec_mutex)) + if (!kexec_trylock()) return -EBUSY; if (!kexec_image) { error = -EINVAL; @@ -1208,6 +1212,6 @@ int kernel_kexec(void) #endif Unlock: - mutex_unlock(&kexec_mutex); + kexec_unlock(); return error; } diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 1d546dc97c50..45637511e0de 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -339,7 +339,7 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, image = NULL; - if (!mutex_trylock(&kexec_mutex)) + if (!kexec_trylock()) return -EBUSY; dest_image = &kexec_image; @@ -411,7 +411,7 @@ out: if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image) arch_kexec_protect_crashkres(); - mutex_unlock(&kexec_mutex); + kexec_unlock(); kimage_free(image); return ret; } diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 48aaf2ac0d0d..74da1409cd14 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h @@ -13,7 +13,20 @@ void kimage_terminate(struct kimage *image); int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end); -extern struct mutex kexec_mutex; +/* + * Whatever is used to serialize accesses to the kexec_crash_image needs to be + * NMI safe, as __crash_kexec() can happen during nmi_panic(), so here we use a + * "simple" atomic variable that is acquired with a cmpxchg(). + */ +extern atomic_t __kexec_lock; +static inline bool kexec_trylock(void) +{ + return atomic_cmpxchg_acquire(&__kexec_lock, 0, 1) == 0; +} +static inline void kexec_unlock(void) +{ + atomic_set_release(&__kexec_lock, 0); +} #ifdef CONFIG_KEXEC_FILE #include <linux/purgatory.h> diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index b1292a57c2a5..65dba9076f31 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -105,7 +105,12 @@ KERNEL_ATTR_RO(kexec_crash_loaded); static ssize_t kexec_crash_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%zu\n", crash_get_memory_size()); + ssize_t size = crash_get_memory_size(); + + if (size < 0) + return size; + + return sprintf(buf, "%zd\n", size); } static ssize_t kexec_crash_size_store(struct kobject *kobj, struct kobj_attribute *attr, diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 76166df011a4..781249098cb6 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -112,7 +112,7 @@ static void __sched account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat) { - int firstnonnull = MAXLR + 1; + int firstnonnull = MAXLR; int i; /* skip kernel threads for now */ @@ -150,7 +150,7 @@ account_global_scheduler_latency(struct task_struct *tsk, } i = firstnonnull; - if (i >= MAXLR - 1) + if (i >= MAXLR) return; /* Allocted a new one: */ diff --git a/kernel/profile.c b/kernel/profile.c index 7ea01ba30e75..8a77769bc4b4 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -59,43 +59,39 @@ int profile_setup(char *str) static const char schedstr[] = "schedule"; static const char sleepstr[] = "sleep"; static const char kvmstr[] = "kvm"; + const char *select = NULL; int par; if (!strncmp(str, sleepstr, strlen(sleepstr))) { #ifdef CONFIG_SCHEDSTATS force_schedstat_enabled(); prof_on = SLEEP_PROFILING; - if (str[strlen(sleepstr)] == ',') - str += strlen(sleepstr) + 1; - if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel sleep profiling enabled (shift: %u)\n", - prof_shift); + select = sleepstr; #else pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); #endif /* CONFIG_SCHEDSTATS */ } else if (!strncmp(str, schedstr, strlen(schedstr))) { prof_on = SCHED_PROFILING; - if (str[strlen(schedstr)] == ',') - str += strlen(schedstr) + 1; - if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel schedule profiling enabled (shift: %u)\n", - prof_shift); + select = schedstr; } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { prof_on = KVM_PROFILING; - if (str[strlen(kvmstr)] == ',') - str += strlen(kvmstr) + 1; - if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel KVM profiling enabled (shift: %u)\n", - prof_shift); + select = kvmstr; } else if (get_option(&str, &par)) { prof_shift = clamp(par, 0, BITS_PER_LONG - 1); prof_on = CPU_PROFILING; pr_info("kernel profiling enabled (shift: %u)\n", prof_shift); } + + if (select) { + if (str[strlen(select)] == ',') + str += strlen(select) + 1; + if (get_option(&str, &par)) + prof_shift = clamp(par, 0, BITS_PER_LONG - 1); + pr_info("kernel %s profiling enabled (shift: %u)\n", + select, prof_shift); + } + return 1; } __setup("profile=", profile_setup); diff --git a/kernel/relay.c b/kernel/relay.c index 6a611e779e95..d7edc934c56d 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -60,10 +60,7 @@ static const struct vm_operations_struct relay_file_mmap_ops = { */ static struct page **relay_alloc_page_array(unsigned int n_pages) { - const size_t pa_size = n_pages * sizeof(struct page *); - if (pa_size > PAGE_SIZE) - return vzalloc(pa_size); - return kzalloc(pa_size, GFP_KERNEL); + return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); } /* diff --git a/kernel/smpboot.c b/kernel/smpboot.c index b9f54544e749..2c7396da470c 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -433,7 +433,7 @@ bool cpu_wait_death(unsigned int cpu, int seconds) /* The outgoing CPU will normally get done quite quickly. */ if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) - goto update_state; + goto update_state_early; udelay(5); /* But if the outgoing CPU dawdles, wait increasingly long times. */ @@ -444,16 +444,17 @@ bool cpu_wait_death(unsigned int cpu, int seconds) break; sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10); } -update_state: +update_state_early: oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); +update_state: if (oldstate == CPU_DEAD) { /* Outgoing CPU died normally, update state. */ smp_mb(); /* atomic_read() before update. */ atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); } else { /* Outgoing CPU still hasn't died, set state accordingly. */ - if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), - oldstate, CPU_BROKEN) != oldstate) + if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), + &oldstate, CPU_BROKEN)) goto update_state; ret = false; } @@ -475,14 +476,14 @@ bool cpu_report_death(void) int newstate; int cpu = smp_processor_id(); + oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); do { - oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); if (oldstate != CPU_BROKEN) newstate = CPU_DEAD; else newstate = CPU_DEAD_FROZEN; - } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), - oldstate, newstate) != oldstate); + } while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), + &oldstate, newstate)); return newstate == CPU_DEAD; } diff --git a/kernel/task_work.c b/kernel/task_work.c index dff75bcde151..065e1ef8fc8d 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -47,12 +47,12 @@ int task_work_add(struct task_struct *task, struct callback_head *work, /* record the work call stack in order to print it in KASAN reports */ kasan_record_aux_stack(work); + head = READ_ONCE(task->task_works); do { - head = READ_ONCE(task->task_works); if (unlikely(head == &work_exited)) return -ESRCH; work->next = head; - } while (cmpxchg(&task->task_works, head, work) != head); + } while (!try_cmpxchg(&task->task_works, &head, work)); switch (notify) { case TWA_NONE: @@ -100,10 +100,12 @@ task_work_cancel_match(struct task_struct *task, * we raced with task_work_run(), *pprev == NULL/exited. */ raw_spin_lock_irqsave(&task->pi_lock, flags); - while ((work = READ_ONCE(*pprev))) { - if (!match(work, data)) + work = READ_ONCE(*pprev); + while (work) { + if (!match(work, data)) { pprev = &work->next; - else if (cmpxchg(pprev, work, work->next) == work) + work = READ_ONCE(*pprev); + } else if (try_cmpxchg(pprev, &work, work->next)) break; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); @@ -151,16 +153,16 @@ void task_work_run(void) * work->func() can do task_work_add(), do not set * work_exited unless the list is empty. */ + work = READ_ONCE(task->task_works); do { head = NULL; - work = READ_ONCE(task->task_works); if (!work) { if (task->flags & PF_EXITING) head = &work_exited; else break; } - } while (cmpxchg(&task->task_works, work, head) != work); + } while (!try_cmpxchg(&task->task_works, &work, head)); if (!work) break; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02e18c436439..fbf2543111c0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2028,7 +2028,6 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, static void print_ip_ins(const char *fmt, const unsigned char *p) { char ins[MCOUNT_INSN_SIZE]; - int i; if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) { printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); @@ -2036,9 +2035,7 @@ static void print_ip_ins(const char *fmt, const unsigned char *p) } printk(KERN_CONT "%s", fmt); - - for (i = 0; i < MCOUNT_INSN_SIZE; i++) - printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]); + pr_cont("%*phC", MCOUNT_INSN_SIZE, ins); } enum ftrace_bug_type ftrace_bug_type; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c3f354cfc5ba..199759c73519 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -885,7 +885,7 @@ size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) } /** - * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer + * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer * @buffer: The ring_buffer to get the number of pages from * @cpu: The cpu of the ring_buffer to get the number of pages from * @@ -5305,7 +5305,7 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); /** - * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer + * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer * @buffer: The ring buffer to reset a per cpu buffer of * @cpu: The CPU buffer to be reset */ @@ -5375,7 +5375,7 @@ void ring_buffer_reset(struct trace_buffer *buffer) EXPORT_SYMBOL_GPL(ring_buffer_reset); /** - * rind_buffer_empty - is the ring buffer empty? + * ring_buffer_empty - is the ring buffer empty? * @buffer: The ring buffer to test */ bool ring_buffer_empty(struct trace_buffer *buffer) diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index c08bde9871ec..5dd0617e5df6 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -16,6 +16,7 @@ #include "trace_dynevent.h" #include "trace_probe.h" #include "trace_probe_tmpl.h" +#include "trace_probe_kernel.h" #define EPROBE_EVENT_SYSTEM "eprobes" @@ -456,29 +457,14 @@ NOKPROBE_SYMBOL(process_fetch_insn) static nokprobe_inline int fetch_store_strlen_user(unsigned long addr) { - const void __user *uaddr = (__force const void __user *)addr; - - return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); + return kern_fetch_store_strlen_user(addr); } /* Return the length of string -- including null terminal byte */ static nokprobe_inline int fetch_store_strlen(unsigned long addr) { - int ret, len = 0; - u8 c; - -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE - if (addr < TASK_SIZE) - return fetch_store_strlen_user(addr); -#endif - - do { - ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); - len++; - } while (c && ret == 0 && len < MAX_STRING_SIZE); - - return (ret < 0) ? ret : len; + return kern_fetch_store_strlen(addr); } /* @@ -488,21 +474,7 @@ fetch_store_strlen(unsigned long addr) static nokprobe_inline int fetch_store_string_user(unsigned long addr, void *dest, void *base) { - const void __user *uaddr = (__force const void __user *)addr; - int maxlen = get_loc_len(*(u32 *)dest); - void *__dest; - long ret; - - if (unlikely(!maxlen)) - return -ENOMEM; - - __dest = get_loc_data(dest, base); - - ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); - - return ret; + return kern_fetch_store_string_user(addr, dest, base); } /* @@ -512,29 +484,7 @@ fetch_store_string_user(unsigned long addr, void *dest, void *base) static nokprobe_inline int fetch_store_string(unsigned long addr, void *dest, void *base) { - int maxlen = get_loc_len(*(u32 *)dest); - void *__dest; - long ret; - -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE - if ((unsigned long)addr < TASK_SIZE) - return fetch_store_string_user(addr, dest, base); -#endif - - if (unlikely(!maxlen)) - return -ENOMEM; - - __dest = get_loc_data(dest, base); - - /* - * Try to get string again, since the string can be changed while - * probing. - */ - ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); - - return ret; + return kern_fetch_store_string(addr, dest, base); } static nokprobe_inline int diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 5e8c07aef071..e310052dc83c 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -17,6 +17,8 @@ /* for gfp flag names */ #include <linux/trace_events.h> #include <trace/events/mmflags.h> +#include "trace_probe.h" +#include "trace_probe_kernel.h" #include "trace_synth.h" @@ -409,6 +411,7 @@ static unsigned int trace_string(struct synth_trace_event *entry, { unsigned int len = 0; char *str_field; + int ret; if (is_dynamic) { u32 data_offset; @@ -417,19 +420,27 @@ static unsigned int trace_string(struct synth_trace_event *entry, data_offset += event->n_u64 * sizeof(u64); data_offset += data_size; - str_field = (char *)entry + data_offset; - - len = strlen(str_val) + 1; - strscpy(str_field, str_val, len); + len = kern_fetch_store_strlen((unsigned long)str_val); data_offset |= len << 16; *(u32 *)&entry->fields[*n_u64] = data_offset; + ret = kern_fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry); + (*n_u64)++; } else { str_field = (char *)&entry->fields[*n_u64]; - strscpy(str_field, str_val, STR_VAR_LEN_MAX); +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if ((unsigned long)str_val < TASK_SIZE) + ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX); + else +#endif + ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX); + + if (ret < 0) + strcpy(str_field, FAULT_STRING); + (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64); } @@ -462,7 +473,7 @@ static notrace void trace_event_raw_event_synth(void *__data, val_idx = var_ref_idx[field_pos]; str_val = (char *)(long)var_ref_vals[val_idx]; - len = strlen(str_val) + 1; + len = kern_fetch_store_strlen((unsigned long)str_val); fields_size += len; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 23f7f0ec4f4c..5a75b039e586 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -20,6 +20,7 @@ #include "trace_kprobe_selftest.h" #include "trace_probe.h" #include "trace_probe_tmpl.h" +#include "trace_probe_kernel.h" #define KPROBE_EVENT_SYSTEM "kprobes" #define KRETPROBE_MAXACTIVE_MAX 4096 @@ -1223,29 +1224,14 @@ static const struct file_operations kprobe_profile_ops = { static nokprobe_inline int fetch_store_strlen_user(unsigned long addr) { - const void __user *uaddr = (__force const void __user *)addr; - - return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); + return kern_fetch_store_strlen_user(addr); } /* Return the length of string -- including null terminal byte */ static nokprobe_inline int fetch_store_strlen(unsigned long addr) { - int ret, len = 0; - u8 c; - -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE - if (addr < TASK_SIZE) - return fetch_store_strlen_user(addr); -#endif - - do { - ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); - len++; - } while (c && ret == 0 && len < MAX_STRING_SIZE); - - return (ret < 0) ? ret : len; + return kern_fetch_store_strlen(addr); } /* @@ -1255,21 +1241,7 @@ fetch_store_strlen(unsigned long addr) static nokprobe_inline int fetch_store_string_user(unsigned long addr, void *dest, void *base) { - const void __user *uaddr = (__force const void __user *)addr; - int maxlen = get_loc_len(*(u32 *)dest); - void *__dest; - long ret; - - if (unlikely(!maxlen)) - return -ENOMEM; - - __dest = get_loc_data(dest, base); - - ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); - - return ret; + return kern_fetch_store_string_user(addr, dest, base); } /* @@ -1279,29 +1251,7 @@ fetch_store_string_user(unsigned long addr, void *dest, void *base) static nokprobe_inline int fetch_store_string(unsigned long addr, void *dest, void *base) { - int maxlen = get_loc_len(*(u32 *)dest); - void *__dest; - long ret; - -#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE - if ((unsigned long)addr < TASK_SIZE) - return fetch_store_string_user(addr, dest, base); -#endif - - if (unlikely(!maxlen)) - return -ENOMEM; - - __dest = get_loc_data(dest, base); - - /* - * Try to get string again, since the string can be changed while - * probing. - */ - ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); - - return ret; + return kern_fetch_store_string(addr, dest, base); } static nokprobe_inline int diff --git a/kernel/trace/trace_probe_kernel.h b/kernel/trace/trace_probe_kernel.h new file mode 100644 index 000000000000..77dbd9ff9782 --- /dev/null +++ b/kernel/trace/trace_probe_kernel.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __TRACE_PROBE_KERNEL_H_ +#define __TRACE_PROBE_KERNEL_H_ + +#define FAULT_STRING "(fault)" + +/* + * This depends on trace_probe.h, but can not include it due to + * the way trace_probe_tmpl.h is used by trace_kprobe.c and trace_eprobe.c. + * Which means that any other user must include trace_probe.h before including + * this file. + */ +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline int +kern_fetch_store_strlen_user(unsigned long addr) +{ + const void __user *uaddr = (__force const void __user *)addr; + int ret; + + ret = strnlen_user_nofault(uaddr, MAX_STRING_SIZE); + /* + * strnlen_user_nofault returns zero on fault, insert the + * FAULT_STRING when that occurs. + */ + if (ret <= 0) + return strlen(FAULT_STRING) + 1; + return ret; +} + +/* Return the length of string -- including null terminal byte */ +static nokprobe_inline int +kern_fetch_store_strlen(unsigned long addr) +{ + int ret, len = 0; + u8 c; + +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if (addr < TASK_SIZE) + return kern_fetch_store_strlen_user(addr); +#endif + + do { + ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); + len++; + } while (c && ret == 0 && len < MAX_STRING_SIZE); + + /* For faults, return enough to hold the FAULT_STRING */ + return (ret < 0) ? strlen(FAULT_STRING) + 1 : len; +} + +static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base, int len) +{ + if (ret >= 0) { + *(u32 *)dest = make_data_loc(ret, __dest - base); + } else { + strscpy(__dest, FAULT_STRING, len); + ret = strlen(__dest) + 1; + } +} + +/* + * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf + * with max length and relative data location. + */ +static nokprobe_inline int +kern_fetch_store_string_user(unsigned long addr, void *dest, void *base) +{ + const void __user *uaddr = (__force const void __user *)addr; + int maxlen = get_loc_len(*(u32 *)dest); + void *__dest; + long ret; + + if (unlikely(!maxlen)) + return -ENOMEM; + + __dest = get_loc_data(dest, base); + + ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); + set_data_loc(ret, dest, __dest, base, maxlen); + + return ret; +} + +/* + * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max + * length and relative data location. + */ +static nokprobe_inline int +kern_fetch_store_string(unsigned long addr, void *dest, void *base) +{ + int maxlen = get_loc_len(*(u32 *)dest); + void *__dest; + long ret; + +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + if ((unsigned long)addr < TASK_SIZE) + return kern_fetch_store_string_user(addr, dest, base); +#endif + + if (unlikely(!maxlen)) + return -ENOMEM; + + __dest = get_loc_data(dest, base); + + /* + * Try to get string again, since the string can be changed while + * probing. + */ + ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); + set_data_loc(ret, dest, __dest, base, maxlen); + + return ret; +} + +#endif /* __TRACE_PROBE_KERNEL_H_ */ diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index de16bcf14b03..064072c16e3d 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -76,6 +76,13 @@ static DEFINE_CTL_TABLE_POLL(domainname_poll); static struct ctl_table uts_kern_table[] = { { + .procname = "arch", + .data = init_uts_ns.name.machine, + .maxlen = sizeof(init_uts_ns.name.machine), + .mode = 0444, + .proc_handler = proc_do_uts_string, + }, + { .procname = "ostype", .data = init_uts_ns.name.sysname, .maxlen = sizeof(init_uts_ns.name.sysname), diff --git a/lib/cmdline.c b/lib/cmdline.c index 5546bf588780..90ed997d9570 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -260,7 +260,7 @@ char *next_arg(char *args, char **param, char **val) args[i-1] = '\0'; } } - if (quoted && args[i-1] == '"') + if (quoted && i > 0 && args[i-1] == '"') args[i-1] = '\0'; if (args[i]) { diff --git a/lib/earlycpio.c b/lib/earlycpio.c index 7921193f0424..d2c37d64fd0c 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -126,7 +126,7 @@ struct cpio_data find_cpio_data(const char *path, void *data, "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", p, MAX_CPIO_FILE_NAME); } - strlcpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME); + strscpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME); cd.data = (void *)dptr; cd.size = ch[C_FILESIZE]; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 13d0bd8b07a9..4df0335d0d06 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -161,6 +161,13 @@ static void kunit_resource_test_alloc_resource(struct kunit *test) kunit_put_resource(res); } +static inline bool kunit_resource_instance_match(struct kunit *test, + struct kunit_resource *res, + void *match_data) +{ + return res->data == match_data; +} + /* * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence * they have a reference to the associated resource that they must release diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c index 141789ca8949..f5ae79c37400 100644 --- a/lib/kunit/string-stream.c +++ b/lib/kunit/string-stream.c @@ -12,62 +12,29 @@ #include "string-stream.h" -struct string_stream_fragment_alloc_context { - struct kunit *test; - int len; - gfp_t gfp; -}; -static int string_stream_fragment_init(struct kunit_resource *res, - void *context) +static struct string_stream_fragment *alloc_string_stream_fragment( + struct kunit *test, int len, gfp_t gfp) { - struct string_stream_fragment_alloc_context *ctx = context; struct string_stream_fragment *frag; - frag = kunit_kzalloc(ctx->test, sizeof(*frag), ctx->gfp); + frag = kunit_kzalloc(test, sizeof(*frag), gfp); if (!frag) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - frag->test = ctx->test; - frag->fragment = kunit_kmalloc(ctx->test, ctx->len, ctx->gfp); + frag->fragment = kunit_kmalloc(test, len, gfp); if (!frag->fragment) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - res->data = frag; - - return 0; + return frag; } -static void string_stream_fragment_free(struct kunit_resource *res) +static void string_stream_fragment_destroy(struct kunit *test, + struct string_stream_fragment *frag) { - struct string_stream_fragment *frag = res->data; - list_del(&frag->node); - kunit_kfree(frag->test, frag->fragment); - kunit_kfree(frag->test, frag); -} - -static struct string_stream_fragment *alloc_string_stream_fragment( - struct kunit *test, int len, gfp_t gfp) -{ - struct string_stream_fragment_alloc_context context = { - .test = test, - .len = len, - .gfp = gfp - }; - - return kunit_alloc_resource(test, - string_stream_fragment_init, - string_stream_fragment_free, - gfp, - &context); -} - -static int string_stream_fragment_destroy(struct string_stream_fragment *frag) -{ - return kunit_destroy_resource(frag->test, - kunit_resource_instance_match, - frag); + kunit_kfree(test, frag->fragment); + kunit_kfree(test, frag); } int string_stream_vadd(struct string_stream *stream, @@ -122,7 +89,7 @@ static void string_stream_clear(struct string_stream *stream) frag_container_safe, &stream->fragments, node) { - string_stream_fragment_destroy(frag_container); + string_stream_fragment_destroy(stream->test, frag_container); } stream->length = 0; spin_unlock(&stream->lock); @@ -169,48 +136,23 @@ struct string_stream_alloc_context { gfp_t gfp; }; -static int string_stream_init(struct kunit_resource *res, void *context) +struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp) { struct string_stream *stream; - struct string_stream_alloc_context *ctx = context; - stream = kunit_kzalloc(ctx->test, sizeof(*stream), ctx->gfp); + stream = kunit_kzalloc(test, sizeof(*stream), gfp); if (!stream) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - res->data = stream; - stream->gfp = ctx->gfp; - stream->test = ctx->test; + stream->gfp = gfp; + stream->test = test; INIT_LIST_HEAD(&stream->fragments); spin_lock_init(&stream->lock); - return 0; + return stream; } -static void string_stream_free(struct kunit_resource *res) +void string_stream_destroy(struct string_stream *stream) { - struct string_stream *stream = res->data; - string_stream_clear(stream); } - -struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp) -{ - struct string_stream_alloc_context context = { - .test = test, - .gfp = gfp - }; - - return kunit_alloc_resource(test, - string_stream_init, - string_stream_free, - gfp, - &context); -} - -int string_stream_destroy(struct string_stream *stream) -{ - return kunit_destroy_resource(stream->test, - kunit_resource_instance_match, - stream); -} diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h index 43f9508a55b4..b669f9a75a94 100644 --- a/lib/kunit/string-stream.h +++ b/lib/kunit/string-stream.h @@ -14,7 +14,6 @@ #include <linux/stdarg.h> struct string_stream_fragment { - struct kunit *test; struct list_head node; char *fragment; }; @@ -46,6 +45,6 @@ int string_stream_append(struct string_stream *stream, bool string_stream_is_empty(struct string_stream *stream); -int string_stream_destroy(struct string_stream *stream); +void string_stream_destroy(struct string_stream *stream); #endif /* _KUNIT_STRING_STREAM_H */ diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 1e54373309a4..90640a43cf62 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -258,7 +258,7 @@ static void kunit_print_string_stream(struct kunit *test, static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, const struct kunit_assert *assert, - const struct va_format *message) + assert_format_t assert_format, const struct va_format *message) { struct string_stream *stream; @@ -274,11 +274,11 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, } kunit_assert_prologue(loc, type, stream); - assert->format(assert, message, stream); + assert_format(assert, message, stream); kunit_print_string_stream(test, stream); - WARN_ON(string_stream_destroy(stream)); + string_stream_destroy(stream); } static void __noreturn kunit_abort(struct kunit *test) @@ -298,6 +298,7 @@ void kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, const struct kunit_assert *assert, + assert_format_t assert_format, const char *fmt, ...) { va_list args; @@ -307,7 +308,7 @@ void kunit_do_failed_assertion(struct kunit *test, message.fmt = fmt; message.va = &args; - kunit_fail(test, loc, type, assert, &message); + kunit_fail(test, loc, type, assert, assert_format, &message); va_end(args); @@ -713,21 +714,20 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) } EXPORT_SYMBOL_GPL(kunit_kmalloc_array); -void kunit_kfree(struct kunit *test, const void *ptr) +static inline bool kunit_kfree_match(struct kunit *test, + struct kunit_resource *res, void *match_data) { - struct kunit_resource *res; - - res = kunit_find_resource(test, kunit_resource_instance_match, - (void *)ptr); - - /* - * Removing the resource from the list of resources drops the - * reference count to 1; the final put will trigger the free. - */ - kunit_remove_resource(test, res); + /* Only match resources allocated with kunit_kmalloc() and friends. */ + return res->free == kunit_kmalloc_array_free && res->data == match_data; +} - kunit_put_resource(res); +void kunit_kfree(struct kunit *test, const void *ptr) +{ + if (!ptr) + return; + if (kunit_destroy_resource(test, kunit_kfree_match, (void *)ptr)) + KUNIT_FAIL(test, "kunit_kfree: %px already freed or not allocated by kunit", ptr); } EXPORT_SYMBOL_GPL(kunit_kfree); diff --git a/lib/llist.c b/lib/llist.c index 611ce4881a87..7d78b736e8af 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -30,7 +30,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, do { new_last->next = first = READ_ONCE(head->first); - } while (cmpxchg(&head->first, first, new_first) != first); + } while (!try_cmpxchg(&head->first, &first, new_first)); return !first; } @@ -52,18 +52,14 @@ EXPORT_SYMBOL_GPL(llist_add_batch); */ struct llist_node *llist_del_first(struct llist_head *head) { - struct llist_node *entry, *old_entry, *next; + struct llist_node *entry, *next; entry = smp_load_acquire(&head->first); - for (;;) { + do { if (entry == NULL) return NULL; - old_entry = entry; next = READ_ONCE(entry->next); - entry = cmpxchg(&head->first, old_entry, next); - if (entry == old_entry) - break; - } + } while (!try_cmpxchg(&head->first, &entry, next)); return entry; } diff --git a/mm/damon/core.c b/mm/damon/core.c index 4de8c7c52979..8e1ab38d0f1f 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -335,6 +335,7 @@ struct damon_target *damon_new_target(void) t->pid = NULL; t->nr_regions = 0; INIT_LIST_HEAD(&t->regions_list); + INIT_LIST_HEAD(&t->list); return t; } @@ -537,6 +537,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); + + /* + * Considering PTE level hugetlb, like continuous-PTE hugetlb on + * ARM64 architecture. + */ + if (is_vm_hugetlb_page(vma)) { + page = follow_huge_pmd_pte(vma, address, flags); + if (page) + return page; + return no_page_table(vma, flags); + } + retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); @@ -669,7 +681,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { - page = follow_huge_pmd(mm, address, pmd, flags); + page = follow_huge_pmd_pte(vma, address, flags); if (page) return page; return no_page_table(vma, flags); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0ad53ad98e74..57b7b0b5d9eb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7150,12 +7150,13 @@ follow_huge_pd(struct vm_area_struct *vma, } struct page * __weak -follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int flags) +follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags) { + struct hstate *h = hstate_vma(vma); + struct mm_struct *mm = vma->vm_mm; struct page *page = NULL; spinlock_t *ptl; - pte_t pte; + pte_t *ptep, pte; /* * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via @@ -7165,17 +7166,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; retry: - ptl = pmd_lockptr(mm, pmd); - spin_lock(ptl); - /* - * make sure that the address range covered by this pmd is not - * unmapped from other threads. - */ - if (!pmd_huge(*pmd)) - goto out; - pte = huge_ptep_get((pte_t *)pmd); + ptep = huge_pte_offset(mm, address, huge_page_size(h)); + if (!ptep) + return NULL; + + ptl = huge_pte_lock(h, mm, ptep); + pte = huge_ptep_get(ptep); if (pte_present(pte)) { - page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); + page = pte_page(pte) + + ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); /* * try_grab_page() should always succeed here, because: a) we * hold the pmd (ptl) lock, and b) we've just checked that the @@ -7191,7 +7190,7 @@ retry: } else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); - __migration_entry_wait_huge((pte_t *)pmd, ptl); + __migration_entry_wait_huge(ptep, ptl); goto retry; } /* diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d3bb656308b4..dfa237fbd5a3 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -728,7 +728,6 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, it->iter.bi_size = cursor->resid; BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); - cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); } static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, @@ -754,10 +753,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, cursor->resid -= bytes; bio_advance_iter(it->bio, &it->iter, bytes); - if (!cursor->resid) { - BUG_ON(!cursor->last_piece); + if (!cursor->resid) return false; /* no more data */ - } if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done && page == bio_iter_page(it->bio, it->iter))) @@ -770,9 +767,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, it->iter.bi_size = cursor->resid; } - BUG_ON(cursor->last_piece); BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); - cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); return true; } #endif /* CONFIG_BLOCK */ @@ -788,8 +783,6 @@ static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor, cursor->bvec_iter.bi_size = cursor->resid; BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); - cursor->last_piece = - cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); } static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor, @@ -815,19 +808,14 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor, cursor->resid -= bytes; bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes); - if (!cursor->resid) { - BUG_ON(!cursor->last_piece); + if (!cursor->resid) return false; /* no more data */ - } if (!bytes || (cursor->bvec_iter.bi_bvec_done && page == bvec_iter_page(bvecs, cursor->bvec_iter))) return false; /* more bytes to process in this segment */ - BUG_ON(cursor->last_piece); BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); - cursor->last_piece = - cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); return true; } @@ -853,7 +841,6 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, BUG_ON(page_count > (int)USHRT_MAX); cursor->page_count = (unsigned short)page_count; BUG_ON(length > SIZE_MAX - cursor->page_offset); - cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; } static struct page * @@ -868,11 +855,7 @@ ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, BUG_ON(cursor->page_offset >= PAGE_SIZE); *page_offset = cursor->page_offset; - if (cursor->last_piece) - *length = cursor->resid; - else - *length = PAGE_SIZE - *page_offset; - + *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset); return data->pages[cursor->page_index]; } @@ -897,8 +880,6 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, BUG_ON(cursor->page_index >= cursor->page_count); cursor->page_index++; - cursor->last_piece = cursor->resid <= PAGE_SIZE; - return true; } @@ -928,7 +909,6 @@ ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, cursor->resid = min(length, pagelist->length); cursor->page = page; cursor->offset = 0; - cursor->last_piece = cursor->resid <= PAGE_SIZE; } static struct page * @@ -948,11 +928,7 @@ ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, /* offset of first page in pagelist is always 0 */ *page_offset = cursor->offset & ~PAGE_MASK; - if (cursor->last_piece) - *length = cursor->resid; - else - *length = PAGE_SIZE - *page_offset; - + *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset); return cursor->page; } @@ -985,8 +961,6 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); cursor->page = list_next_entry(cursor->page, lru); - cursor->last_piece = cursor->resid <= PAGE_SIZE; - return true; } @@ -1044,8 +1018,7 @@ void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor, * Indicate whether this is the last piece in this data item. */ struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, - size_t *page_offset, size_t *length, - bool *last_piece) + size_t *page_offset, size_t *length) { struct page *page; @@ -1074,8 +1047,6 @@ struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, BUG_ON(*page_offset + *length > PAGE_SIZE); BUG_ON(!*length); BUG_ON(*length > cursor->resid); - if (last_piece) - *last_piece = cursor->last_piece; return page; } @@ -1112,7 +1083,6 @@ void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes) cursor->total_resid -= bytes; if (!cursor->resid && cursor->total_resid) { - WARN_ON(!cursor->last_piece); cursor->data++; __ceph_msg_data_cursor_init(cursor); new_piece = true; diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c index 6b014eca3a13..3ddbde87e4d6 100644 --- a/net/ceph/messenger_v1.c +++ b/net/ceph/messenger_v1.c @@ -495,7 +495,7 @@ static int write_partial_message_data(struct ceph_connection *con) continue; } - page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); + page = ceph_msg_data_next(cursor, &page_offset, &length); if (length == cursor->total_resid) more = MSG_MORE; ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, @@ -1008,7 +1008,7 @@ static int read_partial_msg_data(struct ceph_connection *con) continue; } - page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); + page = ceph_msg_data_next(cursor, &page_offset, &length); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) { if (do_datacrc) @@ -1050,7 +1050,7 @@ static int read_partial_msg_data_bounce(struct ceph_connection *con) continue; } - page = ceph_msg_data_next(cursor, &off, &len, NULL); + page = ceph_msg_data_next(cursor, &off, &len); ret = ceph_tcp_recvpage(con->sock, con->bounce_page, 0, len); if (ret <= 0) { con->in_data_crc = crc; diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index c6e5bfc717d5..cc8ff81a50b7 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -862,7 +862,7 @@ static void get_bvec_at(struct ceph_msg_data_cursor *cursor, ceph_msg_data_advance(cursor, 0); /* get a piece of data, cursor isn't advanced */ - page = ceph_msg_data_next(cursor, &off, &len, NULL); + page = ceph_msg_data_next(cursor, &off, &len); bv->bv_page = page; bv->bv_offset = off; diff --git a/net/core/sock.c b/net/core/sock.c index eeb6cbac6f49..a3ba0358c77c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3610,7 +3610,8 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; - return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); @@ -3636,7 +3637,8 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; - return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); diff --git a/net/dsa/port.c b/net/dsa/port.c index e4a0513816bb..208168276995 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1681,7 +1681,7 @@ int dsa_port_phylink_create(struct dsa_port *dp) pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn), mode, &dsa_port_phylink_mac_ops); if (IS_ERR(pl)) { - pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); + pr_err("error creating PHYLINK: %ld\n", PTR_ERR(pl)); return PTR_ERR(pl); } diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index cbd0e2ac4ffe..6e55fae4c686 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -251,9 +251,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) return -EOPNOTSUPP; } - if (!size) - return -EINVAL; - lock_sock(sk); if (!sk->sk_bound_dev_if) dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); @@ -275,6 +272,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) err = -EMSGSIZE; goto out_dev; } + if (!size) { + err = 0; + goto out_dev; + } hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e2c219382345..3dd02396517d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -558,22 +558,27 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; + const struct proto *prot; int err; if (addr_len < sizeof(uaddr->sa_family)) return -EINVAL; + + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + if (uaddr->sa_family == AF_UNSPEC) - return sk->sk_prot->disconnect(sk, flags); + return prot->disconnect(sk, flags); if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) { - err = sk->sk_prot->pre_connect(sk, uaddr, addr_len); + err = prot->pre_connect(sk, uaddr, addr_len); if (err) return err; } if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk)) return -EAGAIN; - return sk->sk_prot->connect(sk, uaddr, addr_len); + return prot->connect(sk, uaddr, addr_len); } EXPORT_SYMBOL(inet_dgram_connect); @@ -734,10 +739,11 @@ EXPORT_SYMBOL(inet_stream_connect); int inet_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { - struct sock *sk1 = sock->sk; + struct sock *sk1 = sock->sk, *sk2; int err = -EINVAL; - struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern); + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern); if (!sk2) goto do_err; @@ -825,12 +831,15 @@ ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { struct sock *sk = sock->sk; + const struct proto *prot; if (unlikely(inet_send_prepare(sk))) return -EAGAIN; - if (sk->sk_prot->sendpage) - return sk->sk_prot->sendpage(sk, page, offset, size, flags); + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + if (prot->sendpage) + return prot->sendpage(sk, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(inet_sendpage); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 2dc97583d279..e9a7f70a54df 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -888,13 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, return 1; } + /* cannot match on nexthop object attributes */ + if (fi->nh) + return 1; + if (cfg->fc_oif || cfg->fc_gw_family) { struct fib_nh *nh; - /* cannot match on nexthop object attributes */ - if (fi->nh) - return 1; - nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 71d3bb0abf6c..66fc940f9521 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -268,8 +268,21 @@ restart_rcu: rcu_read_lock(); restart: sk_nulls_for_each_rcu(sk, node, &head->chain) { - if (sk->sk_state != TCP_TIME_WAIT) + if (sk->sk_state != TCP_TIME_WAIT) { + /* A kernel listener socket might not hold refcnt for net, + * so reqsk_timer_handler() could be fired after net is + * freed. Userspace listener and reqsk never exist here. + */ + if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV && + hashinfo->pernet)) { + struct request_sock *req = inet_reqsk(sk); + + inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); + } + continue; + } + tw = inet_twsk(sk); if ((tw->tw_family != family) || refcount_read(&twsk_net(tw)->ns.count)) diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 8183bbcabb4a..ff85db52b2e5 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -77,7 +77,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; flow.flowi4_scope = RT_SCOPE_UNIVERSE; - flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par)); + flow.flowi4_l3mdev = l3mdev_master_ifindex_rcu(xt_in(par)); return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert; } diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 7ade04ff972d..e886147eed11 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -84,7 +84,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, oif = NULL; if (priv->flags & NFTA_FIB_F_IIF) - fl4.flowi4_oif = l3mdev_master_ifindex_rcu(oif); + fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif); if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 517042caf6dc..bde333b24837 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -617,21 +617,9 @@ int ping_getfrag(void *from, char *to, { struct pingfakehdr *pfh = from; - if (offset == 0) { - fraglen -= sizeof(struct icmphdr); - if (fraglen < 0) - BUG(); - if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr), - fraglen, &pfh->wcheck, - &pfh->msg->msg_iter)) - return -EFAULT; - } else if (offset < sizeof(struct icmphdr)) { - BUG(); - } else { - if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck, - &pfh->msg->msg_iter)) - return -EFAULT; - } + if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck, + &pfh->msg->msg_iter)) + return -EFAULT; #if IS_ENABLED(CONFIG_IPV6) /* For IPv6, checksum each skb as we go along, as expected by @@ -639,7 +627,7 @@ int ping_getfrag(void *from, char *to, * wcheck, it will be finalized in ping_v4_push_pending_frames. */ if (pfh->family == AF_INET6) { - skb->csum = pfh->wcheck; + skb->csum = csum_block_add(skb->csum, pfh->wcheck, odd); skb->ip_summed = CHECKSUM_NONE; pfh->wcheck = 0; } @@ -842,7 +830,8 @@ back_from_confirm: pfh.family = AF_INET; err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, - 0, &ipc, &rt, msg->msg_flags); + sizeof(struct icmphdr), &ipc, &rt, + msg->msg_flags); if (err) ip_flush_pending_frames(sk); else diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0c51abeee172..f8232811a5be 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3796,8 +3796,9 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, const struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) - return icsk->icsk_af_ops->setsockopt(sk, level, optname, - optval, optlen); + /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ + return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, + optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(tcp_setsockopt); @@ -4396,8 +4397,9 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, struct inet_connection_sock *icsk = inet_csk(sk); if (level != SOL_TCP) - return icsk->icsk_af_ops->getsockopt(sk, level, optname, - optval, optlen); + /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ + return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, + optval, optlen); return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); } diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index ddc7ba0554bd..112f28f93693 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -375,6 +375,7 @@ static void tcp_cdg_init(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); + ca->gradients = NULL; /* We silently fall back to window = 1 if allocation fails. */ if (window > 1) ca->gradients = kcalloc(window, sizeof(ca->gradients[0]), @@ -388,6 +389,7 @@ static void tcp_cdg_release(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); kfree(ca->gradients); + ca->gradients = NULL; } static struct tcp_congestion_ops tcp_cdg __read_mostly = { diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 79f30f026d89..c375f603a16c 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -353,13 +353,14 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family) struct net *net; list_for_each_entry(net, net_exit_list, exit_list) { - /* The last refcount is decremented in tcp_sk_exit_batch() */ - if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1) - continue; - if (net->ipv4.tcp_death_row.hashinfo->pernet) { + /* Even if tw_refcount == 1, we must clean up kernel reqsk */ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family); } else if (!purged_once) { + /* The last refcount is decremented in tcp_sk_exit_batch() */ + if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1) + continue; + inet_twsk_purge(&tcp_hashinfo, family); purged_once = true; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d63118ce5900..8126f67d18b3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1598,7 +1598,7 @@ drop: } EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); -void udp_destruct_sock(struct sock *sk) +void udp_destruct_common(struct sock *sk) { /* reclaim completely the forward allocated memory */ struct udp_sock *up = udp_sk(sk); @@ -1611,10 +1611,14 @@ void udp_destruct_sock(struct sock *sk) kfree_skb(skb); } udp_rmem_release(sk, total, 0, true); +} +EXPORT_SYMBOL_GPL(udp_destruct_common); +static void udp_destruct_sock(struct sock *sk) +{ + udp_destruct_common(sk); inet_sock_destruct(sk); } -EXPORT_SYMBOL_GPL(udp_destruct_sock); int udp_init_sock(struct sock *sk) { @@ -1622,7 +1626,6 @@ int udp_init_sock(struct sock *sk) sk->sk_destruct = udp_destruct_sock; return 0; } -EXPORT_SYMBOL_GPL(udp_init_sock); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) { diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 6e08a76ae1e7..e0c9cc39b81e 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -17,6 +17,14 @@ struct udp_table udplite_table __read_mostly; EXPORT_SYMBOL(udplite_table); +/* Designate sk as UDP-Lite socket */ +static int udplite_sk_init(struct sock *sk) +{ + udp_init_sock(sk); + udp_sk(sk)->pcflag = UDPLITE_BIT; + return 0; +} + static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d40b7d60e00e..024191004982 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -109,6 +109,12 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } +void inet6_sock_destruct(struct sock *sk) +{ + inet6_cleanup_sock(sk); + inet_sock_destruct(sk); +} + static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -201,7 +207,7 @@ lookup_protocol: inet->hdrincl = 1; } - sk->sk_destruct = inet_sock_destruct; + sk->sk_destruct = inet6_sock_destruct; sk->sk_family = PF_INET6; sk->sk_protocol = protocol; @@ -510,6 +516,12 @@ void inet6_destroy_sock(struct sock *sk) } EXPORT_SYMBOL_GPL(inet6_destroy_sock); +void inet6_cleanup_sock(struct sock *sk) +{ + inet6_destroy_sock(sk); +} +EXPORT_SYMBOL_GPL(inet6_cleanup_sock); + /* * This does both peername and sockname. */ diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 2d2f4dd9e5df..532f4478c884 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -419,15 +419,18 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname, rtnl_lock(); sockopt_lock_sock(sk); + /* Another thread has converted the socket into IPv4 with + * IPV6_ADDRFORM concurrently. + */ + if (unlikely(sk->sk_family != AF_INET6)) + goto unlock; + switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { - struct ipv6_txoptions *opt; - struct sk_buff *pktopt; - if (sk->sk_type == SOCK_RAW) break; @@ -458,7 +461,6 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; } - fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); __ipv6_sock_ac_close(sk); @@ -475,9 +477,10 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); - /* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */ + /* Paired with READ_ONCE(sk->sk_prot) in inet6_stream_ops */ WRITE_ONCE(sk->sk_prot, &tcp_prot); - icsk->icsk_af_ops = &ipv4_specific; + /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ + WRITE_ONCE(icsk->icsk_af_ops, &ipv4_specific); sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); @@ -490,19 +493,19 @@ int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); - /* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */ + /* Paired with READ_ONCE(sk->sk_prot) in inet6_dgram_ops */ WRITE_ONCE(sk->sk_prot, prot); sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } - opt = xchg((__force struct ipv6_txoptions **)&np->opt, - NULL); - if (opt) { - atomic_sub(opt->tot_len, &sk->sk_omem_alloc); - txopt_put(opt); - } - pktopt = xchg(&np->pktoptions, NULL); - kfree_skb(pktopt); + + /* Disable all options not to allocate memory anymore, + * but there is still a race. See the lockless path + * in udpv6_sendmsg() and ipv6_local_rxpmtu(). + */ + np->rxopt.all = 0; + + inet6_cleanup_sock(sk); /* * ... and add it to the refcnt debug socks count @@ -994,6 +997,7 @@ done: break; } +unlock: sockopt_release_sock(sk); if (needs_rtnl) rtnl_unlock(); diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index d800801a5dd2..69d86b040a6a 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -37,6 +37,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, bool ret = false; struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, + .flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev), .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK, .flowi6_proto = iph->nexthdr, .daddr = iph->saddr, @@ -55,9 +56,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, if (rpfilter_addr_linklocal(&iph->saddr)) { lookup_flags |= RT6_LOOKUP_F_IFACE; fl6.flowi6_oif = dev->ifindex; - /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */ - } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) || - (flags & XT_RPFILTER_LOOSE) == 0) + } else if ((flags & XT_RPFILTER_LOOSE) == 0) fl6.flowi6_oif = dev->ifindex; rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags); @@ -72,9 +71,7 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, goto out; } - if (rt->rt6i_idev->dev == dev || - l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex || - (flags & XT_RPFILTER_LOOSE)) + if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) ret = true; out: ip6_rt_put(rt); diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 1d7e520d9966..91faac610e03 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -41,9 +41,8 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, if (ipv6_addr_type(&fl6->daddr) & IPV6_ADDR_LINKLOCAL) { lookup_flags |= RT6_LOOKUP_F_IFACE; fl6->flowi6_oif = get_ifindex(dev ? dev : pkt->skb->dev); - } else if ((priv->flags & NFTA_FIB_F_IIF) && - (netif_is_l3_master(dev) || netif_is_l3_slave(dev))) { - fl6->flowi6_oif = dev->ifindex; + } else if (priv->flags & NFTA_FIB_F_IIF) { + fl6->flowi6_l3mdev = l3mdev_master_ifindex_rcu(dev); } if (ipv6_addr_type(&fl6->saddr) & IPV6_ADDR_UNICAST) diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 5f2ef8493714..86c26e48d065 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -179,7 +179,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, - 0, &ipc6, &fl6, rt, + sizeof(struct icmp6hdr), &ipc6, &fl6, rt, MSG_DONTWAIT); if (err) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index a8adda623da1..2a3f9296df1e 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -238,7 +238,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; - icsk->icsk_af_ops = &ipv6_mapped; + /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ + WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, true); sk->sk_backlog_rcv = tcp_v4_do_rcv; @@ -250,7 +251,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (err) { icsk->icsk_ext_hdr_len = exthdrlen; - icsk->icsk_af_ops = &ipv6_specific; + /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ + WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, false); sk->sk_backlog_rcv = tcp_v6_do_rcv; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 91e795bb9ade..8d09f0ea5b8c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -56,6 +56,19 @@ #include <trace/events/skb.h> #include "udp_impl.h" +static void udpv6_destruct_sock(struct sock *sk) +{ + udp_destruct_common(sk); + inet6_sock_destruct(sk); +} + +int udpv6_init_sock(struct sock *sk) +{ + skb_queue_head_init(&udp_sk(sk)->reader_queue); + sk->sk_destruct = udpv6_destruct_sock; + return 0; +} + static u32 udp6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, @@ -1733,7 +1746,7 @@ struct proto udpv6_prot = { .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, - .init = udp_init_sock, + .init = udpv6_init_sock, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 4251e49d32a0..0590f566379d 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -12,6 +12,7 @@ int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int); int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32, struct udp_table *); +int udpv6_init_sock(struct sock *sk); int udp_v6_get_port(struct sock *sk, unsigned short snum); void udp_v6_rehash(struct sock *sk); diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index b70725856259..67eaf3ca14ce 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -12,6 +12,13 @@ #include <linux/proc_fs.h> #include "udp_impl.h" +static int udplitev6_sk_init(struct sock *sk) +{ + udpv6_init_sock(sk); + udp_sk(sk)->pcflag = UDPLITE_BIT; + return 0; +} + static int udplitev6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); @@ -38,7 +45,7 @@ struct proto udplitev6_prot = { .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, - .init = udplite_sk_init, + .init = udplitev6_sk_init, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 1215c863e1c4..27725464ec08 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1838,10 +1838,10 @@ static int kcm_release(struct socket *sock) kcm = kcm_sk(sk); mux = kcm->mux; + lock_sock(sk); sock_orphan(sk); kfree_skb(kcm->seq_skb); - lock_sock(sk); /* Purge queue under lock to avoid race condition with tx_work trying * to act when queue is nonempty. If tx_work runs after this point * it will just return. diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4e1d4c339f2d..a842f2e1c230 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1709,6 +1709,14 @@ struct ieee802_11_elems { /* whether a parse error occurred while retrieving these elements */ bool parse_error; + + /* + * scratch buffer that can be used for various element parsing related + * tasks, e.g., element de-fragmentation etc. + */ + size_t scratch_len; + u8 *scratch_pos; + u8 scratch[]; }; static inline struct ieee80211_local *hw_to_local( diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 572254366a0f..dd9ac1f7d2ea 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -243,7 +243,7 @@ static int ieee80211_can_powered_addr_change(struct ieee80211_sub_if_data *sdata */ break; default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; } unlock: @@ -461,7 +461,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do /* * Stop TX on this interface first. */ - if (sdata->dev) + if (!local->ops->wake_tx_queue && sdata->dev) netif_tx_stop_all_queues(sdata->dev); ieee80211_roc_purge(local, sdata); @@ -1412,8 +1412,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) sdata->vif.type != NL80211_IFTYPE_STATION); } - set_bit(SDATA_STATE_RUNNING, &sdata->state); - switch (sdata->vif.type) { case NL80211_IFTYPE_P2P_DEVICE: rcu_assign_pointer(local->p2p_sdata, sdata); @@ -1472,6 +1470,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } + set_bit(SDATA_STATE_RUNNING, &sdata->state); + return 0; err_del_interface: drv_remove_interface(local, sdata); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 54b8d5065bbd..d8484cd870de 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4409,8 +4409,11 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ies->data, ies->len); + if (!he_cap_elem) + return false; + /* invalid HE IE */ - if (!he_cap_elem || he_cap_elem->datalen < 1 + sizeof(*he_cap)) { + if (he_cap_elem->datalen < 1 + sizeof(*he_cap)) { sdata_info(sdata, "Invalid HE elem, Disable HE\n"); return false; @@ -4676,8 +4679,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } if (!elems->vht_cap_elem) { - sdata_info(sdata, - "bad VHT capabilities, disabling VHT\n"); *conn_flags |= IEEE80211_CONN_DISABLE_VHT; vht_oper = NULL; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bd215fe3c796..f99416d2e144 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1978,10 +1978,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + - NUM_DEFAULT_BEACON_KEYS) { - cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, - skb->data, - skb->len); + NUM_DEFAULT_BEACON_KEYS) { + if (rx->sdata->dev) + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, + skb->data, + skb->len); return RX_DROP_MONITOR; /* unexpected BIP keyidx */ } @@ -2131,7 +2132,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) /* either the frame has been decrypted or will be dropped */ status->flag |= RX_FLAG_DECRYPTED; - if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) + if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE && + rx->sdata->dev)) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, skb->data, skb->len); @@ -4352,6 +4354,7 @@ void ieee80211_check_fast_rx(struct sta_info *sta) .vif_type = sdata->vif.type, .control_port_protocol = sdata->control_port_protocol, }, *old, *new = NULL; + u32 offload_flags; bool set_offload = false; bool assign = false; bool offload; @@ -4467,10 +4470,10 @@ void ieee80211_check_fast_rx(struct sta_info *sta) if (assign) new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); - offload = assign && - (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED); + offload_flags = get_bss_sdata(sdata)->vif.offload_flags; + offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED; - if (offload) + if (assign && offload) set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); else set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); @@ -4708,7 +4711,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) - goto drop; + return false; payload = (void *)(skb->data + snap_offs); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 27c964be102e..a364148149f9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2319,6 +2319,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, u16 len_rthdr; int hdrlen; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (unlikely(!ieee80211_sdata_running(sdata))) + goto fail; + memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; @@ -2378,8 +2382,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, * This is necessary, for example, for old hostapd versions that * don't use nl80211-based management TX/RX. */ - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; @@ -4169,7 +4171,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct sk_buff *next; int len = skb->len; - if (unlikely(skb->len < ETH_HLEN)) { + if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { kfree_skb(skb); return; } @@ -4566,7 +4568,7 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, struct ieee80211_key *key; struct sta_info *sta; - if (unlikely(skb->len < ETH_HLEN)) { + if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { kfree_skb(skb); return NETDEV_TX_OK; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index bf7461c41bef..b512cb37aafb 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1445,6 +1445,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; + if (elem->data[0] < 1 || elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; @@ -1504,24 +1506,26 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; + size_t scratch_len = params->len; - elems = kzalloc(sizeof(*elems), GFP_ATOMIC); + elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC); if (!elems) return NULL; elems->ie_start = params->start; elems->total_len = params->len; - - nontransmitted_profile = kmalloc(params->len, GFP_ATOMIC); - if (nontransmitted_profile) { - nontransmitted_profile_len = - ieee802_11_find_bssid_profile(params->start, params->len, - elems, params->bss, - nontransmitted_profile); - non_inherit = - cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - nontransmitted_profile, - nontransmitted_profile_len); - } + elems->scratch_len = scratch_len; + elems->scratch_pos = elems->scratch; + + nontransmitted_profile = elems->scratch_pos; + nontransmitted_profile_len = + ieee802_11_find_bssid_profile(params->start, params->len, + elems, params->bss, + nontransmitted_profile); + elems->scratch_pos += nontransmitted_profile_len; + elems->scratch_len -= nontransmitted_profile_len; + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + nontransmitted_profile, + nontransmitted_profile_len); elems->crc = _ieee802_11_parse_elems_full(params, elems, non_inherit); @@ -1555,8 +1559,6 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; - kfree(nontransmitted_profile); - return elems; } @@ -2046,7 +2048,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, if (he_cap) { enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); - __le16 cap = ieee80211_get_he_6ghz_capa(sband, iftype); + __le16 cap = ieee80211_get_he_6ghz_capa(sband6, iftype); pos = ieee80211_write_he_6ghz_cap(pos, cap, end); } diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index c2fc2a7b2528..b6b5e496fa40 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -295,11 +295,12 @@ __must_hold(&net->mctp.keys_lock) mctp_dev_release_key(key->dev, key); spin_unlock_irqrestore(&key->lock, flags); - hlist_del(&key->hlist); - hlist_del(&key->sklist); - - /* unref for the lists */ - mctp_key_unref(key); + if (!hlist_unhashed(&key->hlist)) { + hlist_del_init(&key->hlist); + hlist_del_init(&key->sklist); + /* unref for the lists */ + mctp_key_unref(key); + } kfree_skb(skb); } @@ -373,9 +374,17 @@ static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg) ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC; if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) { - spin_lock_irqsave(&key->lock, flags); - __mctp_key_remove(key, net, flags, MCTP_TRACE_KEY_DROPPED); + unsigned long fl2; + /* Unwind our key allocation: the keys list lock needs to be + * taken before the individual key locks, and we need a valid + * flags value (fl2) to pass to __mctp_key_remove, hence the + * second spin_lock_irqsave() rather than a plain spin_lock(). + */ + spin_lock_irqsave(&net->mctp.keys_lock, flags); + spin_lock_irqsave(&key->lock, fl2); + __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_DROPPED); mctp_key_unref(key); + spin_unlock_irqrestore(&net->mctp.keys_lock, flags); return -EFAULT; } diff --git a/net/mctp/route.c b/net/mctp/route.c index 3b24b8d18b5b..2155f15a074c 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -228,12 +228,12 @@ __releases(&key->lock) if (!key->manual_alloc) { spin_lock_irqsave(&net->mctp.keys_lock, flags); - hlist_del(&key->hlist); - hlist_del(&key->sklist); + if (!hlist_unhashed(&key->hlist)) { + hlist_del_init(&key->hlist); + hlist_del_init(&key->sklist); + mctp_key_unref(key); + } spin_unlock_irqrestore(&net->mctp.keys_lock, flags); - - /* unref for the lists */ - mctp_key_unref(key); } /* and one for the local reference */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index cb255d8ed99a..c7b10234cf7c 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1015,7 +1015,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, * connections which we will commit, we may need to attach * the helper here. */ - if (info->commit && info->helper && !nfct_help(ct)) { + if (!nf_ct_is_confirmed(ct) && info->commit && + info->helper && !nfct_help(ct)) { int err = __nf_ct_try_assign_helper(ct, info->ct, GFP_ATOMIC); if (err) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 435d866fcfa0..570389f6cdd7 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -2043,14 +2043,12 @@ start_error: static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct taprio_sched *q = qdisc_priv(sch); - struct net_device *dev = qdisc_dev(sch); - unsigned int ntx = cl - 1; + struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); - if (ntx >= dev->num_tx_queues) + if (!dev_queue) return NULL; - return q->qdiscs[ntx]; + return dev_queue->qdisc_sleeping; } static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index c284efa3d1ef..993acf38af87 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -345,7 +345,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt) { int clid; - clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL); + clid = ida_alloc(&rpc_clids, GFP_KERNEL); if (clid < 0) return clid; clnt->cl_clid = clid; @@ -354,7 +354,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt) static void rpc_free_clid(struct rpc_clnt *clnt) { - ida_simple_remove(&rpc_clids, clnt->cl_clid); + ida_free(&rpc_clids, clnt->cl_clid); } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, @@ -873,6 +873,57 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) } EXPORT_SYMBOL_GPL(rpc_killall_tasks); +/** + * rpc_cancel_tasks - try to cancel a set of RPC tasks + * @clnt: Pointer to RPC client + * @error: RPC task error value to set + * @fnmatch: Pointer to selector function + * @data: User data + * + * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. + * The argument @error must be a negative error value. + */ +unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, + bool (*fnmatch)(const struct rpc_task *, + const void *), + const void *data) +{ + struct rpc_task *task; + unsigned long count = 0; + + if (list_empty(&clnt->cl_tasks)) + return 0; + /* + * Spin lock all_tasks to prevent changes... + */ + spin_lock(&clnt->cl_lock); + list_for_each_entry(task, &clnt->cl_tasks, tk_task) { + if (!RPC_IS_ACTIVATED(task)) + continue; + if (!fnmatch(task, data)) + continue; + rpc_task_try_cancel(task, error); + count++; + } + spin_unlock(&clnt->cl_lock); + return count; +} +EXPORT_SYMBOL_GPL(rpc_cancel_tasks); + +static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, + struct rpc_xprt *xprt, void *dummy) +{ + if (xprt_connected(xprt)) + xprt_force_disconnect(xprt); + return 0; +} + +void rpc_clnt_disconnect(struct rpc_clnt *clnt) +{ + rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); +} +EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); + /* * Properly shut down an RPC client, terminating all outstanding * requests. @@ -1642,7 +1693,7 @@ static void __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) { trace_rpc_call_rpcerror(task, tk_status, rpc_status); - task->tk_rpc_status = rpc_status; + rpc_task_set_rpc_status(task, rpc_status); rpc_exit(task, tk_status); } @@ -2435,10 +2486,8 @@ rpc_check_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - if (RPC_SIGNALLED(task)) { - rpc_call_rpcerror(task, -ERESTARTSYS); + if (RPC_SIGNALLED(task)) return; - } if (xprt_adjust_timeout(task->tk_rqstp) == 0) return; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 46cbf151a50b..be587a308e05 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -65,6 +65,13 @@ gfp_t rpc_task_gfp_mask(void) } EXPORT_SYMBOL_GPL(rpc_task_gfp_mask); +bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status) +{ + if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0) + return true; + return false; +} + unsigned long rpc_task_timeout(const struct rpc_task *task) { @@ -853,12 +860,25 @@ void rpc_signal_task(struct rpc_task *task) if (!RPC_IS_ACTIVATED(task)) return; + if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) + return; trace_rpc_task_signalled(task, task->tk_action); set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); smp_mb__after_atomic(); queue = READ_ONCE(task->tk_waitqueue); if (queue) - rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS); + rpc_wake_up_queued_task(queue, task); +} + +void rpc_task_try_cancel(struct rpc_task *task, int error) +{ + struct rpc_wait_queue *queue; + + if (!rpc_task_set_rpc_status(task, error)) + return; + queue = READ_ONCE(task->tk_waitqueue); + if (queue) + rpc_wake_up_queued_task(queue, task); } void rpc_exit(struct rpc_task *task, int status) @@ -905,10 +925,16 @@ static void __rpc_execute(struct rpc_task *task) * Perform the next FSM step or a pending callback. * * tk_action may be NULL if the task has been killed. - * In particular, note that rpc_killall_tasks may - * do this at any time, so beware when dereferencing. */ do_action = task->tk_action; + /* Tasks with an RPC error status should exit */ + if (do_action != rpc_exit_task && + (status = READ_ONCE(task->tk_rpc_status)) != 0) { + task->tk_status = status; + if (do_action != NULL) + do_action = rpc_exit_task; + } + /* Callbacks override all actions */ if (task->tk_callback) { do_action = task->tk_callback; task->tk_callback = NULL; @@ -931,14 +957,6 @@ static void __rpc_execute(struct rpc_task *task) } /* - * Signalled tasks should exit rather than sleep. - */ - if (RPC_SIGNALLED(task)) { - task->tk_rpc_status = -ERESTARTSYS; - rpc_exit(task, -ERESTARTSYS); - } - - /* * The queue->lock protects against races with * rpc_make_runnable(). * @@ -953,6 +971,12 @@ static void __rpc_execute(struct rpc_task *task) spin_unlock(&queue->lock); continue; } + /* Wake up any task that has an exit status */ + if (READ_ONCE(task->tk_rpc_status) != 0) { + rpc_wake_up_task_queue_locked(queue, task); + spin_unlock(&queue->lock); + continue; + } rpc_clear_running(task); spin_unlock(&queue->lock); if (task_is_async) @@ -970,10 +994,7 @@ static void __rpc_execute(struct rpc_task *task) * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ - trace_rpc_task_signalled(task, task->tk_action); - set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); - task->tk_rpc_status = -ERESTARTSYS; - rpc_exit(task, -ERESTARTSYS); + rpc_signal_task(task); } trace_rpc_task_sync_wake(task, task->tk_action); } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f8fae7815649..71dc26373444 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1788,7 +1788,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt) { int id; - id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL); + id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL); if (id < 0) return id; @@ -1798,7 +1798,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt) static void xprt_free_id(struct rpc_xprt *xprt) { - ida_simple_remove(&rpc_xprt_ids, xprt->id); + ida_free(&rpc_xprt_ids, xprt->id); } struct rpc_xprt *xprt_alloc(struct net *net, size_t size, @@ -1822,10 +1822,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size, goto out_free; list_add(&req->rq_list, &xprt->free); } - if (max_alloc > num_prealloc) - xprt->max_reqs = max_alloc; - else - xprt->max_reqs = num_prealloc; + xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc); xprt->min_reqs = num_prealloc; xprt->num_reqs = num_prealloc; diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c index 685db598acbe..701250b305db 100644 --- a/net/sunrpc/xprtmultipath.c +++ b/net/sunrpc/xprtmultipath.c @@ -103,7 +103,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags) { int id; - id = ida_simple_get(&rpc_xprtswitch_ids, 0, 0, gfp_flags); + id = ida_alloc(&rpc_xprtswitch_ids, gfp_flags); if (id < 0) return id; @@ -113,7 +113,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags) static void xprt_switch_free_id(struct rpc_xprt_switch *xps) { - ida_simple_remove(&rpc_xprtswitch_ids, xps->xps_id); + ida_free(&rpc_xprtswitch_ids, xps->xps_id); } /** diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index faba7136dd9a..e4d84a13c566 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -189,7 +189,7 @@ create_req: return NULL; size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE); - req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL); + req = rpcrdma_req_create(r_xprt, size); if (!req) return NULL; if (rpcrdma_req_setup(r_xprt, req)) { diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index de0bdb6b729f..ffbf99894970 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -124,16 +124,16 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) unsigned int depth = ep->re_max_fr_depth; struct scatterlist *sg; struct ib_mr *frmr; - int rc; + + sg = kcalloc_node(depth, sizeof(*sg), XPRTRDMA_GFP_FLAGS, + ibdev_to_node(ep->re_id->device)); + if (!sg) + return -ENOMEM; frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth); if (IS_ERR(frmr)) goto out_mr_err; - sg = kmalloc_array(depth, sizeof(*sg), GFP_KERNEL); - if (!sg) - goto out_list_err; - mr->mr_xprt = r_xprt; mr->mr_ibmr = frmr; mr->mr_device = NULL; @@ -146,13 +146,9 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) return 0; out_mr_err: - rc = PTR_ERR(frmr); - trace_xprtrdma_frwr_alloc(mr, rc); - return rc; - -out_list_err: - ib_dereg_mr(frmr); - return -ENOMEM; + kfree(sg); + trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr)); + return PTR_ERR(frmr); } /** diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 85c8cdda98b1..aa2227a7e552 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task) return -EINVAL; } - page = alloc_page(RPCRDMA_DEF_GFP); + page = alloc_page(GFP_NOIO | __GFP_NOWARN); if (!page) return -ENOMEM; rqst->rq_buffer = page_address(page); - rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP); + rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, GFP_NOIO | __GFP_NOWARN); if (!rqst->rq_rbuffer) { put_page(page); return -ENOMEM; diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index bcb37b51adf6..10bb2b929c6d 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -494,8 +494,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task) xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO); } trace_xprtrdma_op_connect(r_xprt, delay); - queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker, - delay); + queue_delayed_work(system_long_wq, &r_xprt->rx_connect_worker, delay); } /** diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2fbe9aaeec34..44b87e4274b4 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -76,8 +76,7 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); static void rpcrdma_ep_get(struct rpcrdma_ep *ep); static int rpcrdma_ep_put(struct rpcrdma_ep *ep); static struct rpcrdma_regbuf * -rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, - gfp_t flags); +rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction); static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); @@ -373,7 +372,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) struct rpcrdma_ep *ep; int rc; - ep = kzalloc(sizeof(*ep), GFP_KERNEL); + ep = kzalloc(sizeof(*ep), XPRTRDMA_GFP_FLAGS); if (!ep) return -ENOTCONN; ep->re_xprt = &r_xprt->rx_xprt; @@ -606,7 +605,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep) struct rpcrdma_sendctx *sc; sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), - GFP_KERNEL); + XPRTRDMA_GFP_FLAGS); if (!sc) return NULL; @@ -629,7 +628,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) * Sends are posted. */ i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; - buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); + buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), XPRTRDMA_GFP_FLAGS); if (!buf->rb_sc_ctxs) return -ENOMEM; @@ -740,13 +739,16 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) { struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_ep *ep = r_xprt->rx_ep; + struct ib_device *device = ep->re_id->device; unsigned int count; + /* Try to allocate enough to perform one full-sized I/O */ for (count = 0; count < ep->re_max_rdma_segs; count++) { struct rpcrdma_mr *mr; int rc; - mr = kzalloc(sizeof(*mr), GFP_KERNEL); + mr = kzalloc_node(sizeof(*mr), XPRTRDMA_GFP_FLAGS, + ibdev_to_node(device)); if (!mr) break; @@ -791,38 +793,33 @@ void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt) /* If there is no underlying connection, it's no use * to wake the refresh worker. */ - if (ep->re_connect_status == 1) { - /* The work is scheduled on a WQ_MEM_RECLAIM - * workqueue in order to prevent MR allocation - * from recursing into NFS during direct reclaim. - */ - queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); - } + if (ep->re_connect_status != 1) + return; + queue_work(system_highpri_wq, &buf->rb_refresh_worker); } /** * rpcrdma_req_create - Allocate an rpcrdma_req object * @r_xprt: controlling r_xprt * @size: initial size, in bytes, of send and receive buffers - * @flags: GFP flags passed to memory allocators * * Returns an allocated and fully initialized rpcrdma_req or NULL. */ -struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, - gfp_t flags) +struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, + size_t size) { struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; struct rpcrdma_req *req; - req = kzalloc(sizeof(*req), flags); + req = kzalloc(sizeof(*req), XPRTRDMA_GFP_FLAGS); if (req == NULL) goto out1; - req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); + req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE); if (!req->rl_sendbuf) goto out2; - req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); + req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE); if (!req->rl_recvbuf) goto out3; @@ -858,7 +855,7 @@ int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; maxhdrsize *= sizeof(__be32); rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), - DMA_TO_DEVICE, GFP_KERNEL); + DMA_TO_DEVICE); if (!rb) goto out; @@ -929,12 +926,12 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_rep *rep; - rep = kzalloc(sizeof(*rep), GFP_KERNEL); + rep = kzalloc(sizeof(*rep), XPRTRDMA_GFP_FLAGS); if (rep == NULL) goto out; rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv, - DMA_FROM_DEVICE, GFP_KERNEL); + DMA_FROM_DEVICE); if (!rep->rr_rdmabuf) goto out_free; @@ -1064,8 +1061,8 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) { struct rpcrdma_req *req; - req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2, - GFP_KERNEL); + req = rpcrdma_req_create(r_xprt, + RPCRDMA_V1_DEF_INLINE_SIZE * 2); if (!req) goto out; list_add(&req->rl_list, &buf->rb_send_bufs); @@ -1235,15 +1232,14 @@ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) * or Replies they may be registered externally via frwr_map. */ static struct rpcrdma_regbuf * -rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, - gfp_t flags) +rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction) { struct rpcrdma_regbuf *rb; - rb = kmalloc(sizeof(*rb), flags); + rb = kmalloc(sizeof(*rb), XPRTRDMA_GFP_FLAGS); if (!rb) return NULL; - rb->rg_data = kmalloc(size, flags); + rb->rg_data = kmalloc(size, XPRTRDMA_GFP_FLAGS); if (!rb->rg_data) { kfree(rb); return NULL; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index c79f92eeda76..5e5ff6784ef5 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -149,7 +149,11 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb) return rb->rg_data; } -#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) +/* Do not use emergency memory reserves, and fail quickly if memory + * cannot be allocated easily. These flags may be used wherever there + * is robust logic to handle a failure to allocate. + */ +#define XPRTRDMA_GFP_FLAGS (__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) /* To ensure a transport can always make forward progress, * the number of RDMA segments allowed in header chunk lists @@ -467,8 +471,8 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp); /* * Buffer calls - xprtrdma/verbs.c */ -struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, - gfp_t flags); +struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, + size_t size); int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); void rpcrdma_req_destroy(struct rpcrdma_req *req); int rpcrdma_buffer_create(struct rpcrdma_xprt *); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e976007f4fd0..f34d5427b66c 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -261,7 +261,7 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt) switch (sap->sa_family) { case AF_LOCAL: sun = xs_addr_un(xprt); - strlcpy(buf, sun->sun_path, sizeof(buf)); + strscpy(buf, sun->sun_path, sizeof(buf)); xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL); break; @@ -1978,8 +1978,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) * we'll need to figure out how to pass a namespace to * connect. */ - task->tk_rpc_status = -ENOTCONN; - rpc_exit(task, -ENOTCONN); + rpc_task_set_rpc_status(task, -ENOTCONN); goto out_wake; } ret = xs_local_setup_socket(transport); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8ff8b1c040f0..597c52236514 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -13265,7 +13265,9 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev, wake_mask_size); if (tok) { cfg->tokens_size = tokens_size; - memcpy(&cfg->payload_tok, tok, sizeof(*tok) + tokens_size); + cfg->payload_tok = *tok; + memcpy(cfg->payload_tok.token_stream, tok->token_stream, + tokens_size); } trig->tcp = cfg; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 5382fc2003db..806a5f1330ff 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -143,18 +143,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev, lockdep_assert_held(&rdev->bss_lock); bss->refcount++; - if (bss->pub.hidden_beacon_bss) { - bss = container_of(bss->pub.hidden_beacon_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } - if (bss->pub.transmitted_bss) { - bss = container_of(bss->pub.transmitted_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } + + if (bss->pub.hidden_beacon_bss) + bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++; + + if (bss->pub.transmitted_bss) + bss_from_pub(bss->pub.transmitted_bss)->refcount++; } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, @@ -304,7 +298,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; - while (tmp_old + tmp_old[1] + 2 - ie <= ielen) { + while (tmp_old + 2 - ie <= ielen && + tmp_old + tmp_old[1] + 2 - ie <= ielen) { if (tmp_old[0] == 0) { tmp_old++; continue; @@ -364,7 +359,8 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, * copied to new ie, skip ssid, capability, bssid-index ie */ tmp_new = sub_copy; - while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { + while (tmp_new + 2 - sub_copy <= subie_len && + tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || tmp_new[0] == WLAN_EID_SSID)) { memcpy(pos, tmp_new, tmp_new[1] + 2); @@ -427,6 +423,15 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, rcu_read_unlock(); + /* + * This is a bit weird - it's not on the list, but already on another + * one! The only way that could happen is if there's some BSSID/SSID + * shared by multiple APs in their multi-BSSID profiles, potentially + * with hidden SSID mixed in ... ignore it. + */ + if (!list_empty(&nontrans_bss->nontrans_list)) + return -EINVAL; + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@ -1602,6 +1607,23 @@ struct cfg80211_non_tx_bss { u8 bssid_index; }; +static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known, + const struct cfg80211_bss_ies *new_ies, + const struct cfg80211_bss_ies *old_ies) +{ + struct cfg80211_internal_bss *bss; + + /* Assign beacon IEs to all sub entries */ + list_for_each_entry(bss, &known->hidden_list, hidden_list) { + const struct cfg80211_bss_ies *ies; + + ies = rcu_access_pointer(bss->pub.beacon_ies); + WARN_ON(ies != old_ies); + + rcu_assign_pointer(bss->pub.beacon_ies, new_ies); + } +} + static bool cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *known, @@ -1625,7 +1647,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); } else if (rcu_access_pointer(new->pub.beacon_ies)) { const struct cfg80211_bss_ies *old; - struct cfg80211_internal_bss *bss; if (known->pub.hidden_beacon_bss && !list_empty(&known->hidden_list)) { @@ -1653,16 +1674,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, if (old == rcu_access_pointer(known->pub.ies)) rcu_assign_pointer(known->pub.ies, new->pub.beacon_ies); - /* Assign beacon IEs to all sub entries */ - list_for_each_entry(bss, &known->hidden_list, hidden_list) { - const struct cfg80211_bss_ies *ies; - - ies = rcu_access_pointer(bss->pub.beacon_ies); - WARN_ON(ies != old); - - rcu_assign_pointer(bss->pub.beacon_ies, - new->pub.beacon_ies); - } + cfg80211_update_hidden_bsses(known, new->pub.beacon_ies, old); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); @@ -1739,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); INIT_LIST_HEAD(&new->pub.nontrans_list); + /* we'll set this later if it was non-NULL */ + new->pub.transmitted_bss = NULL; if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); @@ -2021,10 +2035,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { - if (__cfg80211_unlink_bss(rdev, res)) + if (__cfg80211_unlink_bss(rdev, res)) { rdev->bss_generation++; + res = NULL; + } } spin_unlock_bh(&rdev->bss_lock); + + if (!res) + return NULL; } trace_cfg80211_return_bss(&res->pub); @@ -2143,6 +2162,8 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { if (elem->datalen < 4) continue; + if (elem->data[0] < 1 || (int)elem->data[0] > 8) + continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 profile_len; @@ -2279,7 +2300,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, size_t new_ie_len; struct cfg80211_bss_ies *new_ies; const struct cfg80211_bss_ies *old; - u8 cpy_len; + size_t cpy_len; lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock); @@ -2346,6 +2367,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, } else { old = rcu_access_pointer(nontrans_bss->beacon_ies); rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); + cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss), + new_ies, old); rcu_assign_pointer(nontrans_bss->ies, new_ies); if (old) kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); diff --git a/net/wireless/util.c b/net/wireless/util.c index 01493568a21d..1f285b515028 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -559,7 +559,7 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control) + data_offset; - if (skb->len < hdrlen + 8) + if (skb->len < hdrlen) return -1; /* convert IEEE 802.11 header + possible LLC headers into Ethernet @@ -574,8 +574,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN); - if (iftype == NL80211_IFTYPE_MESH_POINT) - skb_copy_bits(skb, hdrlen, &mesh_flags, 1); + if (iftype == NL80211_IFTYPE_MESH_POINT && + skb_copy_bits(skb, hdrlen, &mesh_flags, 1) < 0) + return -1; mesh_flags &= MESH_FLAGS_AE; @@ -595,11 +596,12 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, if (iftype == NL80211_IFTYPE_MESH_POINT) { if (mesh_flags == MESH_FLAGS_AE_A4) return -1; - if (mesh_flags == MESH_FLAGS_AE_A5_A6) { - skb_copy_bits(skb, hdrlen + - offsetof(struct ieee80211s_hdr, eaddr1), - tmp.h_dest, 2 * ETH_ALEN); - } + if (mesh_flags == MESH_FLAGS_AE_A5_A6 && + skb_copy_bits(skb, hdrlen + + offsetof(struct ieee80211s_hdr, eaddr1), + tmp.h_dest, 2 * ETH_ALEN) < 0) + return -1; + hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags); } break; @@ -613,10 +615,11 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, if (iftype == NL80211_IFTYPE_MESH_POINT) { if (mesh_flags == MESH_FLAGS_AE_A5_A6) return -1; - if (mesh_flags == MESH_FLAGS_AE_A4) - skb_copy_bits(skb, hdrlen + - offsetof(struct ieee80211s_hdr, eaddr1), - tmp.h_source, ETH_ALEN); + if (mesh_flags == MESH_FLAGS_AE_A4 && + skb_copy_bits(skb, hdrlen + + offsetof(struct ieee80211s_hdr, eaddr1), + tmp.h_source, ETH_ALEN) < 0) + return -1; hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags); } break; @@ -628,16 +631,15 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, break; } - skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)); - tmp.h_proto = payload.proto; - - if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) && - tmp.h_proto != htons(ETH_P_AARP) && - tmp.h_proto != htons(ETH_P_IPX)) || - ether_addr_equal(payload.hdr, bridge_tunnel_header))) { + if (likely(skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)) == 0 && + ((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) && + payload.proto != htons(ETH_P_AARP) && + payload.proto != htons(ETH_P_IPX)) || + ether_addr_equal(payload.hdr, bridge_tunnel_header)))) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ hdrlen += ETH_ALEN + 2; + tmp.h_proto = payload.proto; skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2); } else { tmp.h_proto = htons(skb->len - hdrlen); diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 76a80a41615b..fe8765c4075d 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -468,6 +468,7 @@ void wireless_send_event(struct net_device * dev, struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; + int ptr_len; #endif /* @@ -582,6 +583,9 @@ void wireless_send_event(struct net_device * dev, nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; + + /* ptr_len is remaining size in event header apart from LCP */ + ptr_len = hdr_len - IW_EV_COMPAT_LCP_LEN; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -612,16 +616,15 @@ void wireless_send_event(struct net_device * dev, if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; - memcpy(&compat_event->pointer, - ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, - hdr_len - IW_EV_COMPAT_LCP_LEN); + memcpy(compat_event->ptr_bytes, + ((char *)&compat_wrqu) + IW_EV_COMPAT_POINT_OFF, + ptr_len); if (extra_len) - memcpy(((char *) compat_event) + hdr_len, - extra, extra_len); + memcpy(&compat_event->ptr_bytes[ptr_len], + extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ - memcpy(&compat_event->pointer, wrqu, - hdr_len - IW_EV_COMPAT_LCP_LEN); + memcpy(compat_event->ptr_bytes, wrqu, ptr_len); } nlmsg_end(compskb, nlh); diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 344c2901a82b..117a8d799f71 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -21,7 +21,6 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -100,35 +99,44 @@ MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices"); #define MBOCHS_TYPE_2 "medium" #define MBOCHS_TYPE_3 "large" -static const struct mbochs_type { - const char *name; +static struct mbochs_type { + struct mdev_type type; u32 mbytes; u32 max_x; u32 max_y; } mbochs_types[] = { { - .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1, + .type.sysfs_name = MBOCHS_TYPE_1, + .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1, .mbytes = 4, .max_x = 800, .max_y = 600, }, { - .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2, + .type.sysfs_name = MBOCHS_TYPE_2, + .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2, .mbytes = 16, .max_x = 1920, .max_y = 1440, }, { - .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3, + .type.sysfs_name = MBOCHS_TYPE_3, + .type.pretty_name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3, .mbytes = 64, .max_x = 0, .max_y = 0, }, }; +static struct mdev_type *mbochs_mdev_types[] = { + &mbochs_types[0].type, + &mbochs_types[1].type, + &mbochs_types[2].type, +}; static dev_t mbochs_devt; static struct class *mbochs_class; static struct cdev mbochs_cdev; static struct device mbochs_dev; +static struct mdev_parent mbochs_parent; static atomic_t mbochs_avail_mbytes; static const struct vfio_device_ops mbochs_dev_ops; @@ -505,13 +513,14 @@ static int mbochs_reset(struct mdev_state *mdev_state) return 0; } -static int mbochs_probe(struct mdev_device *mdev) +static int mbochs_init_dev(struct vfio_device *vdev) { + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mbochs_type *type = + container_of(mdev->type, struct mbochs_type, type); int avail_mbytes = atomic_read(&mbochs_avail_mbytes); - const struct mbochs_type *type = - &mbochs_types[mdev_get_type_group_id(mdev)]; - struct device *dev = mdev_dev(mdev); - struct mdev_state *mdev_state; int ret = -ENOMEM; do { @@ -520,14 +529,9 @@ static int mbochs_probe(struct mdev_device *mdev) } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes, avail_mbytes - type->mbytes)); - mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); - if (mdev_state == NULL) - goto err_avail; - vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops); - mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); - if (mdev_state->vconfig == NULL) - goto err_mem; + if (!mdev_state->vconfig) + goto err_avail; mdev_state->memsize = type->mbytes * 1024 * 1024; mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; @@ -535,10 +539,7 @@ static int mbochs_probe(struct mdev_device *mdev) sizeof(struct page *), GFP_KERNEL); if (!mdev_state->pages) - goto err_mem; - - dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__, - type->name, type->mbytes, mdev_state->pagecount); + goto err_vconfig; mutex_init(&mdev_state->ops_lock); mdev_state->mdev = mdev; @@ -553,19 +554,47 @@ static int mbochs_probe(struct mdev_device *mdev) mbochs_create_config_space(mdev_state); mbochs_reset(mdev_state); + dev_info(vdev->dev, "%s: %s, %d MB, %ld pages\n", __func__, + type->type.pretty_name, type->mbytes, mdev_state->pagecount); + return 0; + +err_vconfig: + kfree(mdev_state->vconfig); +err_avail: + atomic_add(type->mbytes, &mbochs_avail_mbytes); + return ret; +} + +static int mbochs_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state; + int ret = -ENOMEM; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &mbochs_dev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); if (ret) - goto err_mem; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, mdev_state); return 0; -err_mem: - vfio_uninit_group_dev(&mdev_state->vdev); + +err_put_vdev: + vfio_put_device(&mdev_state->vdev); + return ret; +} + +static void mbochs_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + + atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); kfree(mdev_state->pages); kfree(mdev_state->vconfig); - kfree(mdev_state); -err_avail: - atomic_add(type->mbytes, &mbochs_avail_mbytes); - return ret; + vfio_free_device(vdev); } static void mbochs_remove(struct mdev_device *mdev) @@ -573,11 +602,7 @@ static void mbochs_remove(struct mdev_device *mdev) struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); vfio_unregister_group_dev(&mdev_state->vdev); - vfio_uninit_group_dev(&mdev_state->vdev); - atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); - kfree(mdev_state->pages); - kfree(mdev_state->vconfig); - kfree(mdev_state); + vfio_put_device(&mdev_state->vdev); } static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf, @@ -1325,78 +1350,27 @@ static const struct attribute_group *mdev_dev_groups[] = { NULL, }; -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) +static ssize_t mbochs_show_description(struct mdev_type *mtype, char *buf) { - const struct mbochs_type *type = - &mbochs_types[mtype_get_type_group_id(mtype)]; - - return sprintf(buf, "%s\n", type->name); -} -static MDEV_TYPE_ATTR_RO(name); - -static ssize_t description_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - const struct mbochs_type *type = - &mbochs_types[mtype_get_type_group_id(mtype)]; + struct mbochs_type *type = + container_of(mtype, struct mbochs_type, type); return sprintf(buf, "virtual display, %d MB video memory\n", type ? type->mbytes : 0); } -static MDEV_TYPE_ATTR_RO(description); -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) +static unsigned int mbochs_get_available(struct mdev_type *mtype) { - const struct mbochs_type *type = - &mbochs_types[mtype_get_type_group_id(mtype)]; - int count = atomic_read(&mbochs_avail_mbytes) / type->mbytes; - - return sprintf(buf, "%d\n", count); -} -static MDEV_TYPE_ATTR_RO(available_instances); + struct mbochs_type *type = + container_of(mtype, struct mbochs_type, type); -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); + return atomic_read(&mbochs_avail_mbytes) / type->mbytes; } -static MDEV_TYPE_ATTR_RO(device_api); - -static struct attribute *mdev_types_attrs[] = { - &mdev_type_attr_name.attr, - &mdev_type_attr_description.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_available_instances.attr, - NULL, -}; - -static struct attribute_group mdev_type_group1 = { - .name = MBOCHS_TYPE_1, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group mdev_type_group2 = { - .name = MBOCHS_TYPE_2, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group mdev_type_group3 = { - .name = MBOCHS_TYPE_3, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group *mdev_type_groups[] = { - &mdev_type_group1, - &mdev_type_group2, - &mdev_type_group3, - NULL, -}; static const struct vfio_device_ops mbochs_dev_ops = { .close_device = mbochs_close_device, + .init = mbochs_init_dev, + .release = mbochs_release_dev, .read = mbochs_read, .write = mbochs_write, .ioctl = mbochs_ioctl, @@ -1404,6 +1378,7 @@ static const struct vfio_device_ops mbochs_dev_ops = { }; static struct mdev_driver mbochs_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, .driver = { .name = "mbochs", .owner = THIS_MODULE, @@ -1412,7 +1387,8 @@ static struct mdev_driver mbochs_driver = { }, .probe = mbochs_probe, .remove = mbochs_remove, - .supported_type_groups = mdev_type_groups, + .get_available = mbochs_get_available, + .show_description = mbochs_show_description, }; static const struct file_operations vd_fops = { @@ -1457,7 +1433,9 @@ static int __init mbochs_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mbochs_dev, &mbochs_driver); + ret = mdev_register_parent(&mbochs_parent, &mbochs_dev, &mbochs_driver, + mbochs_mdev_types, + ARRAY_SIZE(mbochs_mdev_types)); if (ret) goto err_device; @@ -1478,7 +1456,7 @@ err_cdev: static void __exit mbochs_dev_exit(void) { mbochs_dev.bus = NULL; - mdev_unregister_device(&mbochs_dev); + mdev_unregister_parent(&mbochs_parent); device_unregister(&mbochs_dev); mdev_unregister_driver(&mbochs_driver); diff --git a/samples/vfio-mdev/mdpy.c b/samples/vfio-mdev/mdpy.c index e8c46eb2e246..946e8cfde6fd 100644 --- a/samples/vfio-mdev/mdpy.c +++ b/samples/vfio-mdev/mdpy.c @@ -17,7 +17,6 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -43,36 +42,34 @@ MODULE_LICENSE("GPL v2"); -static int max_devices = 4; -module_param_named(count, max_devices, int, 0444); -MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices"); - - #define MDPY_TYPE_1 "vga" #define MDPY_TYPE_2 "xga" #define MDPY_TYPE_3 "hd" -static const struct mdpy_type { - const char *name; +static struct mdpy_type { + struct mdev_type type; u32 format; u32 bytepp; u32 width; u32 height; } mdpy_types[] = { { - .name = MDPY_CLASS_NAME "-" MDPY_TYPE_1, + .type.sysfs_name = MDPY_TYPE_1, + .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1, .format = DRM_FORMAT_XRGB8888, .bytepp = 4, .width = 640, .height = 480, }, { - .name = MDPY_CLASS_NAME "-" MDPY_TYPE_2, + .type.sysfs_name = MDPY_TYPE_2, + .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2, .format = DRM_FORMAT_XRGB8888, .bytepp = 4, .width = 1024, .height = 768, }, { - .name = MDPY_CLASS_NAME "-" MDPY_TYPE_3, + .type.sysfs_name = MDPY_TYPE_3, + .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3, .format = DRM_FORMAT_XRGB8888, .bytepp = 4, .width = 1920, @@ -80,11 +77,17 @@ static const struct mdpy_type { }, }; +static struct mdev_type *mdpy_mdev_types[] = { + &mdpy_types[0].type, + &mdpy_types[1].type, + &mdpy_types[2].type, +}; + static dev_t mdpy_devt; static struct class *mdpy_class; static struct cdev mdpy_cdev; static struct device mdpy_dev; -static u32 mdpy_count; +static struct mdev_parent mdpy_parent; static const struct vfio_device_ops mdpy_dev_ops; /* State of each mdev device */ @@ -216,61 +219,71 @@ static int mdpy_reset(struct mdev_state *mdev_state) return 0; } -static int mdpy_probe(struct mdev_device *mdev) +static int mdpy_init_dev(struct vfio_device *vdev) { + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); const struct mdpy_type *type = - &mdpy_types[mdev_get_type_group_id(mdev)]; - struct device *dev = mdev_dev(mdev); - struct mdev_state *mdev_state; + container_of(mdev->type, struct mdpy_type, type); u32 fbsize; - int ret; - - if (mdpy_count >= max_devices) - return -ENOMEM; - - mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); - if (mdev_state == NULL) - return -ENOMEM; - vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mdpy_dev_ops); + int ret = -ENOMEM; mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL); - if (mdev_state->vconfig == NULL) { - ret = -ENOMEM; - goto err_state; - } + if (!mdev_state->vconfig) + return ret; fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp); mdev_state->memblk = vmalloc_user(fbsize); - if (!mdev_state->memblk) { - ret = -ENOMEM; - goto err_vconfig; - } - dev_info(dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width, - type->height); + if (!mdev_state->memblk) + goto out_vconfig; mutex_init(&mdev_state->ops_lock); mdev_state->mdev = mdev; - mdev_state->type = type; + mdev_state->type = type; mdev_state->memsize = fbsize; mdpy_create_config_space(mdev_state); mdpy_reset(mdev_state); - mdpy_count++; + dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name, + type->width, type->height); + return 0; + +out_vconfig: + kfree(mdev_state->vconfig); + return ret; +} + +static int mdpy_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state; + int ret; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &mdpy_dev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); if (ret) - goto err_mem; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, mdev_state); return 0; -err_mem: + +err_put_vdev: + vfio_put_device(&mdev_state->vdev); + return ret; +} + +static void mdpy_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + vfree(mdev_state->memblk); -err_vconfig: kfree(mdev_state->vconfig); -err_state: - vfio_uninit_group_dev(&mdev_state->vdev); - kfree(mdev_state); - return ret; + vfio_free_device(vdev); } static void mdpy_remove(struct mdev_device *mdev) @@ -280,12 +293,7 @@ static void mdpy_remove(struct mdev_device *mdev) dev_info(&mdev->dev, "%s\n", __func__); vfio_unregister_group_dev(&mdev_state->vdev); - vfree(mdev_state->memblk); - kfree(mdev_state->vconfig); - vfio_uninit_group_dev(&mdev_state->vdev); - kfree(mdev_state); - - mdpy_count--; + vfio_put_device(&mdev_state->vdev); } static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf, @@ -641,73 +649,17 @@ static const struct attribute_group *mdev_dev_groups[] = { NULL, }; -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - const struct mdpy_type *type = - &mdpy_types[mtype_get_type_group_id(mtype)]; - - return sprintf(buf, "%s\n", type->name); -} -static MDEV_TYPE_ATTR_RO(name); - -static ssize_t description_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) +static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf) { - const struct mdpy_type *type = - &mdpy_types[mtype_get_type_group_id(mtype)]; + struct mdpy_type *type = container_of(mtype, struct mdpy_type, type); return sprintf(buf, "virtual display, %dx%d framebuffer\n", type->width, type->height); } -static MDEV_TYPE_ATTR_RO(description); - -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) -{ - return sprintf(buf, "%d\n", max_devices - mdpy_count); -} -static MDEV_TYPE_ATTR_RO(available_instances); - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); -} -static MDEV_TYPE_ATTR_RO(device_api); - -static struct attribute *mdev_types_attrs[] = { - &mdev_type_attr_name.attr, - &mdev_type_attr_description.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_available_instances.attr, - NULL, -}; - -static struct attribute_group mdev_type_group1 = { - .name = MDPY_TYPE_1, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group mdev_type_group2 = { - .name = MDPY_TYPE_2, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group mdev_type_group3 = { - .name = MDPY_TYPE_3, - .attrs = mdev_types_attrs, -}; - -static struct attribute_group *mdev_type_groups[] = { - &mdev_type_group1, - &mdev_type_group2, - &mdev_type_group3, - NULL, -}; static const struct vfio_device_ops mdpy_dev_ops = { + .init = mdpy_init_dev, + .release = mdpy_release_dev, .read = mdpy_read, .write = mdpy_write, .ioctl = mdpy_ioctl, @@ -715,6 +667,8 @@ static const struct vfio_device_ops mdpy_dev_ops = { }; static struct mdev_driver mdpy_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, + .max_instances = 4, .driver = { .name = "mdpy", .owner = THIS_MODULE, @@ -723,7 +677,7 @@ static struct mdev_driver mdpy_driver = { }, .probe = mdpy_probe, .remove = mdpy_remove, - .supported_type_groups = mdev_type_groups, + .show_description = mdpy_show_description, }; static const struct file_operations vd_fops = { @@ -766,7 +720,9 @@ static int __init mdpy_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mdpy_dev, &mdpy_driver); + ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver, + mdpy_mdev_types, + ARRAY_SIZE(mdpy_mdev_types)); if (ret) goto err_device; @@ -787,7 +743,7 @@ err_cdev: static void __exit mdpy_dev_exit(void) { mdpy_dev.bus = NULL; - mdev_unregister_device(&mdpy_dev); + mdev_unregister_parent(&mdpy_parent); device_unregister(&mdpy_dev); mdev_unregister_driver(&mdpy_driver); @@ -797,5 +753,8 @@ static void __exit mdpy_dev_exit(void) mdpy_class = NULL; } +module_param_named(count, mdpy_driver.max_instances, int, 0444); +MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices"); + module_init(mdpy_dev_init) module_exit(mdpy_dev_exit) diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c index f42a59ed2e3f..e72085fc1376 100644 --- a/samples/vfio-mdev/mtty.c +++ b/samples/vfio-mdev/mtty.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <linux/module.h> -#include <linux/device.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/poll.h> @@ -20,7 +19,6 @@ #include <linux/cdev.h> #include <linux/sched.h> #include <linux/wait.h> -#include <linux/uuid.h> #include <linux/vfio.h> #include <linux/iommu.h> #include <linux/sysfs.h> @@ -74,6 +72,7 @@ static struct mtty_dev { struct cdev vd_cdev; struct idr vd_idr; struct device dev; + struct mdev_parent parent; } mtty_dev; struct mdev_region_info { @@ -144,6 +143,21 @@ struct mdev_state { int nr_ports; }; +static struct mtty_type { + struct mdev_type type; + int nr_ports; +} mtty_types[2] = { + { .nr_ports = 1, .type.sysfs_name = "1", + .type.pretty_name = "Single port serial" }, + { .nr_ports = 2, .type.sysfs_name = "2", + .type.pretty_name = "Dual port serial" }, +}; + +static struct mdev_type *mtty_mdev_types[] = { + &mtty_types[0].type, + &mtty_types[1].type, +}; + static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS); static const struct file_operations vd_fops = { @@ -703,71 +717,82 @@ accessfailed: return ret; } -static int mtty_probe(struct mdev_device *mdev) +static int mtty_init_dev(struct vfio_device *vdev) { - struct mdev_state *mdev_state; - int nr_ports = mdev_get_type_group_id(mdev) + 1; + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mtty_type *type = + container_of(mdev->type, struct mtty_type, type); int avail_ports = atomic_read(&mdev_avail_ports); int ret; do { - if (avail_ports < nr_ports) + if (avail_ports < type->nr_ports) return -ENOSPC; } while (!atomic_try_cmpxchg(&mdev_avail_ports, - &avail_ports, avail_ports - nr_ports)); + &avail_ports, + avail_ports - type->nr_ports)); - mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); - if (mdev_state == NULL) { - ret = -ENOMEM; - goto err_nr_ports; - } - - vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mtty_dev_ops); - - mdev_state->nr_ports = nr_ports; + mdev_state->nr_ports = type->nr_ports; mdev_state->irq_index = -1; mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE; mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE; mutex_init(&mdev_state->rxtx_lock); - mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL); - if (mdev_state->vconfig == NULL) { + mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL); + if (!mdev_state->vconfig) { ret = -ENOMEM; - goto err_state; + goto err_nr_ports; } mutex_init(&mdev_state->ops_lock); mdev_state->mdev = mdev; - mtty_create_config_space(mdev_state); + return 0; + +err_nr_ports: + atomic_add(type->nr_ports, &mdev_avail_ports); + return ret; +} + +static int mtty_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state; + int ret; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &mtty_dev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); if (ret) - goto err_vconfig; + goto err_put_vdev; dev_set_drvdata(&mdev->dev, mdev_state); return 0; -err_vconfig: - kfree(mdev_state->vconfig); -err_state: - vfio_uninit_group_dev(&mdev_state->vdev); - kfree(mdev_state); -err_nr_ports: - atomic_add(nr_ports, &mdev_avail_ports); +err_put_vdev: + vfio_put_device(&mdev_state->vdev); return ret; } +static void mtty_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = + container_of(vdev, struct mdev_state, vdev); + + atomic_add(mdev_state->nr_ports, &mdev_avail_ports); + kfree(mdev_state->vconfig); + vfio_free_device(vdev); +} + static void mtty_remove(struct mdev_device *mdev) { struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); - int nr_ports = mdev_state->nr_ports; vfio_unregister_group_dev(&mdev_state->vdev); - - kfree(mdev_state->vconfig); - vfio_uninit_group_dev(&mdev_state->vdev); - kfree(mdev_state); - atomic_add(nr_ports, &mdev_avail_ports); + vfio_put_device(&mdev_state->vdev); } static int mtty_reset(struct mdev_state *mdev_state) @@ -1231,68 +1256,24 @@ static const struct attribute_group *mdev_dev_groups[] = { NULL, }; -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - static const char *name_str[2] = { "Single port serial", - "Dual port serial" }; - - return sysfs_emit(buf, "%s\n", - name_str[mtype_get_type_group_id(mtype)]); -} - -static MDEV_TYPE_ATTR_RO(name); - -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) +static unsigned int mtty_get_available(struct mdev_type *mtype) { - unsigned int ports = mtype_get_type_group_id(mtype) + 1; + struct mtty_type *type = container_of(mtype, struct mtty_type, type); - return sprintf(buf, "%d\n", atomic_read(&mdev_avail_ports) / ports); + return atomic_read(&mdev_avail_ports) / type->nr_ports; } -static MDEV_TYPE_ATTR_RO(available_instances); - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); -} - -static MDEV_TYPE_ATTR_RO(device_api); - -static struct attribute *mdev_types_attrs[] = { - &mdev_type_attr_name.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_available_instances.attr, - NULL, -}; - -static struct attribute_group mdev_type_group1 = { - .name = "1", - .attrs = mdev_types_attrs, -}; - -static struct attribute_group mdev_type_group2 = { - .name = "2", - .attrs = mdev_types_attrs, -}; - -static struct attribute_group *mdev_type_groups[] = { - &mdev_type_group1, - &mdev_type_group2, - NULL, -}; - static const struct vfio_device_ops mtty_dev_ops = { .name = "vfio-mtty", + .init = mtty_init_dev, + .release = mtty_release_dev, .read = mtty_read, .write = mtty_write, .ioctl = mtty_ioctl, }; static struct mdev_driver mtty_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, .driver = { .name = "mtty", .owner = THIS_MODULE, @@ -1301,7 +1282,7 @@ static struct mdev_driver mtty_driver = { }, .probe = mtty_probe, .remove = mtty_remove, - .supported_type_groups = mdev_type_groups, + .get_available = mtty_get_available, }; static void mtty_device_release(struct device *dev) @@ -1352,7 +1333,9 @@ static int __init mtty_dev_init(void) if (ret) goto err_class; - ret = mdev_register_device(&mtty_dev.dev, &mtty_driver); + ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev, + &mtty_driver, mtty_mdev_types, + ARRAY_SIZE(mtty_mdev_types)); if (ret) goto err_device; return 0; @@ -1372,7 +1355,7 @@ err_cdev: static void __exit mtty_dev_exit(void) { mtty_dev.dev.bus = NULL; - mdev_unregister_device(&mtty_dev.dev); + mdev_unregister_parent(&mtty_dev.parent); device_unregister(&mtty_dev.dev); idr_destroy(&mtty_dev.vd_idr); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c8a616a9d034..1e5e66ae5a52 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -576,10 +576,14 @@ our $typeKernelTypedefs = qr{(?x: (?:__)?(?:u|s|be|le)(?:8|16|32|64)| atomic_t )}; +our $typeStdioTypedefs = qr{(?x: + FILE +)}; our $typeTypedefs = qr{(?x: $typeC99Typedefs\b| $typeOtherOSTypedefs\b| - $typeKernelTypedefs\b + $typeKernelTypedefs\b| + $typeStdioTypedefs\b )}; our $zero_initializer = qr{(?:(?:0[xX])?0+$Int_type?|NULL|false)\b}; @@ -807,6 +811,8 @@ our %deprecated_apis = ( "rcu_barrier_sched" => "rcu_barrier", "get_state_synchronize_sched" => "get_state_synchronize_rcu", "cond_synchronize_sched" => "cond_synchronize_rcu", + "kmap" => "kmap_local_page", + "kmap_atomic" => "kmap_local_page", ); #Create a search pattern for all these strings to speed up a loop below @@ -3140,6 +3146,50 @@ sub process { } } +# Check Fixes: styles is correct + if (!$in_header_lines && + $line =~ /^\s*fixes:?\s*(?:commit\s*)?[0-9a-f]{5,}\b/i) { + my $orig_commit = ""; + my $id = "0123456789ab"; + my $title = "commit title"; + my $tag_case = 1; + my $tag_space = 1; + my $id_length = 1; + my $id_case = 1; + my $title_has_quotes = 0; + + if ($line =~ /(\s*fixes:?)\s+([0-9a-f]{5,})\s+($balanced_parens)/i) { + my $tag = $1; + $orig_commit = $2; + $title = $3; + + $tag_case = 0 if $tag eq "Fixes:"; + $tag_space = 0 if ($line =~ /^fixes:? [0-9a-f]{5,} ($balanced_parens)/i); + + $id_length = 0 if ($orig_commit =~ /^[0-9a-f]{12}$/i); + $id_case = 0 if ($orig_commit !~ /[A-F]/); + + # Always strip leading/trailing parens then double quotes if existing + $title = substr($title, 1, -1); + if ($title =~ /^".*"$/) { + $title = substr($title, 1, -1); + $title_has_quotes = 1; + } + } + + my ($cid, $ctitle) = git_commit_info($orig_commit, $id, + $title); + + if ($ctitle ne $title || $tag_case || $tag_space || + $id_length || $id_case || !$title_has_quotes) { + if (WARN("BAD_FIXES_TAG", + "Please use correct Fixes: style 'Fixes: <12 chars of sha1> (\"<title line>\")' - ie: 'Fixes: $cid (\"$ctitle\")'\n" . $herecurr) && + $fix) { + $fixed[$fixlinenr] = "Fixes: $cid (\"$ctitle\")"; + } + } + } + # Check email subject for common tools that don't need to be mentioned if ($in_header_lines && $line =~ /^Subject:.*\b(?:checkpatch|sparse|smatch)\b[^:]/i) { diff --git a/scripts/decodecode b/scripts/decodecode index c711a196511c..b28fd2686561 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # Disassemble the Code: line in Linux oopses # usage: decodecode < oops.file @@ -8,6 +8,8 @@ # AFLAGS=--32 decodecode < 386.oops # PC=hex - the PC (program counter) the oops points to +faultlinenum=1 + cleanup() { rm -f $T $T.s $T.o $T.oo $T.aa $T.dis exit 1 @@ -102,28 +104,125 @@ disas() { grep -v "/tmp\|Disassembly\|\.text\|^$" > $t.dis 2>&1 } +# Match the maximum number of opcode bytes from @op_bytes contained within +# @opline +# +# Params: +# @op_bytes: The string of bytes from the Code: line +# @opline: The disassembled line coming from objdump +# +# Returns: +# The max number of opcode bytes from the beginning of @op_bytes which match +# the opcode bytes in the objdump line. +get_substr_opcode_bytes_num() +{ + local op_bytes=$1 + local opline=$2 + + local retval=0 + substr="" + + for opc in $op_bytes; + do + substr+="$opc" + + # return if opcode bytes do not match @opline anymore + if ! echo $opline | grep -q "$substr"; + then + break + fi + + # add trailing space + substr+=" " + retval=$((retval+1)) + done + + return $retval +} + +# Return the line number in objdump output to where the IP marker in the Code: +# line points to +# +# Params: +# @all_code: code in bytes without the marker +# @dis_file: disassembled file +# @ip_byte: The byte to which the IP points to +get_faultlinenum() +{ + local all_code="$1" + local dis_file="$2" + + # num bytes including IP byte + local num_bytes_ip=$(( $3 + 1 * $width )) + + # Add the two header lines (we're counting from 1). + local retval=3 + + # remove marker + all_code=$(echo $all_code | sed -e 's/[<>()]//g') + + while read line + do + get_substr_opcode_bytes_num "$all_code" "$line" + ate_opcodes=$? + + if ! (( $ate_opcodes )); then + continue + fi + + num_bytes_ip=$((num_bytes_ip - ($ate_opcodes * $width) )) + if (( $num_bytes_ip <= 0 )); then + break + fi + + # Delete matched opcode bytes from all_code. For that, compute + # how many chars those opcodes are represented by and include + # trailing space. + # + # a byte is 2 chars, ate_opcodes is also the number of trailing + # spaces + del_chars=$(( ($ate_opcodes * $width * 2) + $ate_opcodes )) + + all_code=$(echo $all_code | sed -e "s!^.\{$del_chars\}!!") + + let "retval+=1" + + done < $dis_file + + return $retval +} + marker=`expr index "$code" "\<"` if [ $marker -eq 0 ]; then marker=`expr index "$code" "\("` fi - touch $T.oo if [ $marker -ne 0 ]; then - # 2 opcode bytes and a single space - pc_sub=$(( $marker / 3 )) + # How many bytes to subtract from the program counter + # in order to get to the beginning virtual address of the + # Code: + pc_sub=$(( (($marker - 1) / (2 * $width + 1)) * $width )) echo All code >> $T.oo echo ======== >> $T.oo beforemark=`echo "$code"` echo -n " .$type 0x" > $T.s + echo $beforemark | sed -e 's/ /,0x/g; s/[<>()]//g' >> $T.s + disas $T $pc_sub + cat $T.dis >> $T.oo - rm -f $T.o $T.s $T.dis -# and fix code at-and-after marker + get_faultlinenum "$code" "$T.dis" $pc_sub + faultlinenum=$? + + # and fix code at-and-after marker code=`echo "$code" | cut -c$((${marker} + 1))-` + + rm -f $T.o $T.s $T.dis fi + echo Code starting with the faulting instruction > $T.aa echo =========================================== >> $T.aa code=`echo $code | sed -e 's/\r//;s/ [<(]/ /;s/[>)] / /;s/ /,0x/g; s/[>)]$//'` @@ -132,15 +231,6 @@ echo $code >> $T.s disas $T 0 cat $T.dis >> $T.aa -# (lines of whole $T.oo) - (lines of $T.aa, i.e. "Code starting") + 3, -# i.e. the title + the "===..=" line (sed is counting from 1, 0 address is -# special) -faultlinenum=$(( $(wc -l $T.oo | cut -d" " -f1) - \ - $(wc -l $T.aa | cut -d" " -f1) + 3)) - -faultline=`cat $T.dis | head -1 | cut -d":" -f2-` -faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` - cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/" echo cat $T.aa diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py index 6207be146d26..12a1d525978a 100644 --- a/tools/testing/kunit/qemu_configs/riscv.py +++ b/tools/testing/kunit/qemu_configs/riscv.py @@ -3,17 +3,13 @@ import os import os.path import sys -GITHUB_OPENSBI_URL = 'https://github.com/qemu/qemu/raw/master/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin' -OPENSBI_FILE = os.path.basename(GITHUB_OPENSBI_URL) +OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin' +OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE -if not os.path.isfile(OPENSBI_FILE): - print('\n\nOpenSBI file is not in the current working directory.\n' - 'Would you like me to download it for you from:\n' + GITHUB_OPENSBI_URL + ' ?\n') - response = input('yes/[no]: ') - if response.strip() == 'yes': - os.system('wget ' + GITHUB_OPENSBI_URL) - else: - sys.exit() +if not os.path.isfile(OPENSBI_PATH): + print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n' + 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n') + sys.exit() QEMU_ARCH = QemuArchParams(linux_arch='riscv', kconfig=''' @@ -29,4 +25,4 @@ CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''', extra_qemu_params=[ '-machine', 'virt', '-cpu', 'rv64', - '-bios', 'opensbi-riscv64-generic-fw_dynamic.bin']) + '-bios', OPENSBI_PATH]) diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 3145b0f1835c..8d26d5505808 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -85,7 +85,7 @@ run_enable_disable() { echo $check_disable > $EVENT_ENABLE done sleep $SLEEP_TIME - echo " make sure it's still works" + echo " make sure it still works" test_event_enabled $check_enable_star reset_ftrace_filter diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh index 46a97f318f58..74ee5067a8ce 100755 --- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh +++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh @@ -134,6 +134,16 @@ offline_memory_expect_fail() return 0 } +online_all_offline_memory() +{ + for memory in `hotpluggable_offline_memory`; do + if ! online_memory_expect_success $memory; then + echo "$FUNCNAME $memory: unexpected fail" >&2 + retval=1 + fi + done +} + error=-12 priority=0 # Run with default of ratio=2 for Kselftest run @@ -197,8 +207,11 @@ echo -e "\t trying to offline $target out of $hotpluggable_num memory block(s):" for memory in `hotpluggable_online_memory`; do if [ "$target" -gt 0 ]; then echo "online->offline memory$memory" - if offline_memory_expect_success $memory; then + if offline_memory_expect_success $memory &>/dev/null; then target=$(($target - 1)) + echo "-> Success" + else + echo "-> Failure" fi fi done @@ -257,7 +270,7 @@ prerequisite_extra echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error for memory in `hotpluggable_online_memory`; do if [ $((RANDOM % 100)) -lt $ratio ]; then - offline_memory_expect_success $memory + offline_memory_expect_success $memory &>/dev/null fi done @@ -266,16 +279,16 @@ done # echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error for memory in `hotpluggable_offline_memory`; do - online_memory_expect_fail $memory + if ! online_memory_expect_fail $memory; then + retval=1 + fi done # # Online all hot-pluggable memory # echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error -for memory in `hotpluggable_offline_memory`; do - online_memory_expect_success $memory -done +online_all_offline_memory # # Test memory hot-remove error handling (online => offline) @@ -283,11 +296,18 @@ done echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error for memory in `hotpluggable_online_memory`; do if [ $((RANDOM % 100)) -lt $ratio ]; then - offline_memory_expect_fail $memory + if ! offline_memory_expect_fail $memory; then + retval=1 + fi fi done echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error /sbin/modprobe -q -r memory-notifier-error-inject +# +# Restore memory before exit +# +online_all_offline_memory + exit $retval diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index d5a0dd548989..ee5e98204d3d 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -1223,6 +1223,11 @@ ipv4_fcnal() log_test $rc 0 "Delete nexthop route warning" run_cmd "$IP route delete 172.16.101.1/32 nhid 12" run_cmd "$IP nexthop del id 12" + + run_cmd "$IP nexthop add id 21 via 172.16.1.6 dev veth1" + run_cmd "$IP ro add 172.16.101.0/24 nhid 21" + run_cmd "$IP ro del 172.16.101.0/24 nexthop via 172.16.1.7 dev veth1 nexthop via 172.16.1.8 dev veth1" + log_test $? 2 "Delete multipath route with only nh id based entry" } ipv4_grp_fcnal() diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 600e3a19d5e2..4504ee07be08 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -6,7 +6,7 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ - conntrack_vrf.sh nft_synproxy.sh + conntrack_vrf.sh nft_synproxy.sh rpath.sh CFLAGS += $(shell pkg-config --cflags libmnl 2>/dev/null || echo "-I/usr/include/libmnl") LDLIBS = -lmnl diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh index fd76b69635a4..dff476e45e77 100755 --- a/tools/testing/selftests/netfilter/nft_fib.sh +++ b/tools/testing/selftests/netfilter/nft_fib.sh @@ -188,6 +188,7 @@ test_ping() { ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null +ip netns exec ${nsrouter} sysctl net.ipv4.conf.all.rp_filter=0 > /dev/null ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null sleep 3 diff --git a/tools/testing/selftests/netfilter/rpath.sh b/tools/testing/selftests/netfilter/rpath.sh new file mode 100755 index 000000000000..2d8da7bd8ab7 --- /dev/null +++ b/tools/testing/selftests/netfilter/rpath.sh @@ -0,0 +1,147 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# return code to signal skipped test +ksft_skip=4 + +# search for legacy iptables (it uses the xtables extensions +if iptables-legacy --version >/dev/null 2>&1; then + iptables='iptables-legacy' +elif iptables --version >/dev/null 2>&1; then + iptables='iptables' +else + iptables='' +fi + +if ip6tables-legacy --version >/dev/null 2>&1; then + ip6tables='ip6tables-legacy' +elif ! ip6tables --version >/dev/null 2>&1; then + ip6tables='ip6tables' +else + ip6tables='' +fi + +if nft --version >/dev/null 2>&1; then + nft='nft' +else + nft='' +fi + +if [ -z "$iptables$ip6tables$nft" ]; then + echo "SKIP: Test needs iptables, ip6tables or nft" + exit $ksft_skip +fi + +sfx=$(mktemp -u "XXXXXXXX") +ns1="ns1-$sfx" +ns2="ns2-$sfx" +trap "ip netns del $ns1; ip netns del $ns2" EXIT + +# create two netns, disable rp_filter in ns2 and +# keep IPv6 address when moving into VRF +ip netns add "$ns1" +ip netns add "$ns2" +ip netns exec "$ns2" sysctl -q net.ipv4.conf.all.rp_filter=0 +ip netns exec "$ns2" sysctl -q net.ipv4.conf.default.rp_filter=0 +ip netns exec "$ns2" sysctl -q net.ipv6.conf.all.keep_addr_on_down=1 + +# a standard connection between the netns, should not trigger rp filter +ip -net "$ns1" link add v0 type veth peer name v0 netns "$ns2" +ip -net "$ns1" link set v0 up; ip -net "$ns2" link set v0 up +ip -net "$ns1" a a 192.168.23.2/24 dev v0 +ip -net "$ns2" a a 192.168.23.1/24 dev v0 +ip -net "$ns1" a a fec0:23::2/64 dev v0 nodad +ip -net "$ns2" a a fec0:23::1/64 dev v0 nodad + +# rp filter testing: ns1 sends packets via v0 which ns2 would route back via d0 +ip -net "$ns2" link add d0 type dummy +ip -net "$ns2" link set d0 up +ip -net "$ns1" a a 192.168.42.2/24 dev v0 +ip -net "$ns2" a a 192.168.42.1/24 dev d0 +ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad +ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad + +# firewall matches to test +ip netns exec "$ns2" "$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter +ip netns exec "$ns2" "$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter +ip netns exec "$ns2" nft -f - <<EOF +table inet t { + chain c { + type filter hook prerouting priority raw; + ip saddr 192.168.0.0/16 fib saddr . iif oif exists counter + ip6 saddr fec0::/16 fib saddr . iif oif exists counter + } +} +EOF + +die() { + echo "FAIL: $*" + #ip netns exec "$ns2" "$iptables" -t raw -vS + #ip netns exec "$ns2" "$ip6tables" -t raw -vS + #ip netns exec "$ns2" nft list ruleset + exit 1 +} + +# check rule counters, return true if rule did not match +ipt_zero_rule() { # (command) + [ -n "$1" ] || return 0 + ip netns exec "$ns2" "$1" -t raw -vS | grep -q -- "-m rpfilter -c 0 0" +} +nft_zero_rule() { # (family) + [ -n "$nft" ] || return 0 + ip netns exec "$ns2" "$nft" list chain inet t c | \ + grep -q "$1 saddr .* counter packets 0 bytes 0" +} + +netns_ping() { # (netns, args...) + local netns="$1" + shift + ip netns exec "$netns" ping -q -c 1 -W 1 "$@" >/dev/null +} + +testrun() { + # clear counters first + [ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z + [ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z + if [ -n "$nft" ]; then + ( + echo "delete table inet t"; + ip netns exec "$ns2" nft -s list table inet t; + ) | ip netns exec "$ns2" nft -f - + fi + + # test 1: martian traffic should fail rpfilter matches + netns_ping "$ns1" -I v0 192.168.42.1 && \ + die "martian ping 192.168.42.1 succeeded" + netns_ping "$ns1" -I v0 fec0:42::1 && \ + die "martian ping fec0:42::1 succeeded" + + ipt_zero_rule "$iptables" || die "iptables matched martian" + ipt_zero_rule "$ip6tables" || die "ip6tables matched martian" + nft_zero_rule ip || die "nft IPv4 matched martian" + nft_zero_rule ip6 || die "nft IPv6 matched martian" + + # test 2: rpfilter match should pass for regular traffic + netns_ping "$ns1" 192.168.23.1 || \ + die "regular ping 192.168.23.1 failed" + netns_ping "$ns1" fec0:23::1 || \ + die "regular ping fec0:23::1 failed" + + ipt_zero_rule "$iptables" && die "iptables match not effective" + ipt_zero_rule "$ip6tables" && die "ip6tables match not effective" + nft_zero_rule ip && die "nft IPv4 match not effective" + nft_zero_rule ip6 && die "nft IPv6 match not effective" + +} + +testrun + +# repeat test with vrf device in $ns2 +ip -net "$ns2" link add vrf0 type vrf table 10 +ip -net "$ns2" link set vrf0 up +ip -net "$ns2" link set v0 master vrf0 + +testrun + +echo "PASS: netfilter reverse path match works as intended" +exit 0 diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore index c4e6a34f9657..a156ac5dd2c6 100644 --- a/tools/testing/selftests/proc/.gitignore +++ b/tools/testing/selftests/proc/.gitignore @@ -5,6 +5,7 @@ /proc-fsconfig-hidepid /proc-loadavg-001 /proc-multiple-procfs +/proc-empty-vm /proc-pid-vm /proc-self-map-files-001 /proc-self-map-files-002 diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile index 219fc6113847..cd95369254c0 100644 --- a/tools/testing/selftests/proc/Makefile +++ b/tools/testing/selftests/proc/Makefile @@ -8,6 +8,7 @@ TEST_GEN_PROGS += fd-001-lookup TEST_GEN_PROGS += fd-002-posix-eq TEST_GEN_PROGS += fd-003-kthread TEST_GEN_PROGS += proc-loadavg-001 +TEST_GEN_PROGS += proc-empty-vm TEST_GEN_PROGS += proc-pid-vm TEST_GEN_PROGS += proc-self-map-files-001 TEST_GEN_PROGS += proc-self-map-files-002 diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c new file mode 100644 index 000000000000..d95b1cb43d9d --- /dev/null +++ b/tools/testing/selftests/proc/proc-empty-vm.c @@ -0,0 +1,386 @@ +/* + * Copyright (c) 2022 Alexey Dobriyan <[email protected]> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * Create a process without mappings by unmapping everything at once and + * holding it with ptrace(2). See what happens to + * + * /proc/${pid}/maps + * /proc/${pid}/numa_maps + * /proc/${pid}/smaps + * /proc/${pid}/smaps_rollup + */ +#undef NDEBUG +#include <assert.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/ptrace.h> +#include <sys/resource.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +/* + * 0: vsyscall VMA doesn't exist vsyscall=none + * 1: vsyscall VMA is --xp vsyscall=xonly + * 2: vsyscall VMA is r-xp vsyscall=emulate + */ +static int g_vsyscall; +static const char *g_proc_pid_maps_vsyscall; +static const char *g_proc_pid_smaps_vsyscall; + +static const char proc_pid_maps_vsyscall_0[] = ""; +static const char proc_pid_maps_vsyscall_1[] = +"ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]\n"; +static const char proc_pid_maps_vsyscall_2[] = +"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; + +static const char proc_pid_smaps_vsyscall_0[] = ""; + +static const char proc_pid_smaps_vsyscall_1[] = +"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n" +"Size: 4 kB\n" +"KernelPageSize: 4 kB\n" +"MMUPageSize: 4 kB\n" +"Rss: 0 kB\n" +"Pss: 0 kB\n" +"Pss_Dirty: 0 kB\n" +"Shared_Clean: 0 kB\n" +"Shared_Dirty: 0 kB\n" +"Private_Clean: 0 kB\n" +"Private_Dirty: 0 kB\n" +"Referenced: 0 kB\n" +"Anonymous: 0 kB\n" +"LazyFree: 0 kB\n" +"AnonHugePages: 0 kB\n" +"ShmemPmdMapped: 0 kB\n" +"FilePmdMapped: 0 kB\n" +"Shared_Hugetlb: 0 kB\n" +"Private_Hugetlb: 0 kB\n" +"Swap: 0 kB\n" +"SwapPss: 0 kB\n" +"Locked: 0 kB\n" +"THPeligible: 0\n" +/* + * "ProtectionKey:" field is conditional. It is possible to check it as well, + * but I don't have such machine. + */ +; + +static const char proc_pid_smaps_vsyscall_2[] = +"ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]\n" +"Size: 4 kB\n" +"KernelPageSize: 4 kB\n" +"MMUPageSize: 4 kB\n" +"Rss: 0 kB\n" +"Pss: 0 kB\n" +"Pss_Dirty: 0 kB\n" +"Shared_Clean: 0 kB\n" +"Shared_Dirty: 0 kB\n" +"Private_Clean: 0 kB\n" +"Private_Dirty: 0 kB\n" +"Referenced: 0 kB\n" +"Anonymous: 0 kB\n" +"LazyFree: 0 kB\n" +"AnonHugePages: 0 kB\n" +"ShmemPmdMapped: 0 kB\n" +"FilePmdMapped: 0 kB\n" +"Shared_Hugetlb: 0 kB\n" +"Private_Hugetlb: 0 kB\n" +"Swap: 0 kB\n" +"SwapPss: 0 kB\n" +"Locked: 0 kB\n" +"THPeligible: 0\n" +/* + * "ProtectionKey:" field is conditional. It is possible to check it as well, + * but I'm too tired. + */ +; + +static void sigaction_SIGSEGV(int _, siginfo_t *__, void *___) +{ + _exit(EXIT_FAILURE); +} + +static void sigaction_SIGSEGV_vsyscall(int _, siginfo_t *__, void *___) +{ + _exit(g_vsyscall); +} + +/* + * vsyscall page can't be unmapped, probe it directly. + */ +static void vsyscall(void) +{ + pid_t pid; + int wstatus; + + pid = fork(); + if (pid < 0) { + fprintf(stderr, "fork, errno %d\n", errno); + exit(1); + } + if (pid == 0) { + setrlimit(RLIMIT_CORE, &(struct rlimit){}); + + /* Hide "segfault at ffffffffff600000" messages. */ + struct sigaction act = {}; + act.sa_flags = SA_SIGINFO; + act.sa_sigaction = sigaction_SIGSEGV_vsyscall; + sigaction(SIGSEGV, &act, NULL); + + g_vsyscall = 0; + /* gettimeofday(NULL, NULL); */ + asm volatile ( + "call %P0" + : + : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL) + : "rax", "rcx", "r11" + ); + + g_vsyscall = 1; + *(volatile int *)0xffffffffff600000UL; + + g_vsyscall = 2; + exit(g_vsyscall); + } + waitpid(pid, &wstatus, 0); + if (WIFEXITED(wstatus)) { + g_vsyscall = WEXITSTATUS(wstatus); + } else { + fprintf(stderr, "error: vsyscall wstatus %08x\n", wstatus); + exit(1); + } +} + +static int test_proc_pid_maps(pid_t pid) +{ + char buf[4096]; + snprintf(buf, sizeof(buf), "/proc/%u/maps", pid); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + perror("open /proc/${pid}/maps"); + return EXIT_FAILURE; + } else { + ssize_t rv = read(fd, buf, sizeof(buf)); + close(fd); + if (g_vsyscall == 0) { + assert(rv == 0); + } else { + size_t len = strlen(g_proc_pid_maps_vsyscall); + assert(rv == len); + assert(memcmp(buf, g_proc_pid_maps_vsyscall, len) == 0); + } + return EXIT_SUCCESS; + } +} + +static int test_proc_pid_numa_maps(pid_t pid) +{ + char buf[4096]; + snprintf(buf, sizeof(buf), "/proc/%u/numa_maps", pid); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + if (errno == ENOENT) { + /* + * /proc/${pid}/numa_maps is under CONFIG_NUMA, + * it doesn't necessarily exist. + */ + return EXIT_SUCCESS; + } + perror("open /proc/${pid}/numa_maps"); + return EXIT_FAILURE; + } else { + ssize_t rv = read(fd, buf, sizeof(buf)); + close(fd); + assert(rv == 0); + return EXIT_SUCCESS; + } +} + +static int test_proc_pid_smaps(pid_t pid) +{ + char buf[4096]; + snprintf(buf, sizeof(buf), "/proc/%u/smaps", pid); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + if (errno == ENOENT) { + /* + * /proc/${pid}/smaps is under CONFIG_PROC_PAGE_MONITOR, + * it doesn't necessarily exist. + */ + return EXIT_SUCCESS; + } + perror("open /proc/${pid}/smaps"); + return EXIT_FAILURE; + } else { + ssize_t rv = read(fd, buf, sizeof(buf)); + close(fd); + if (g_vsyscall == 0) { + assert(rv == 0); + } else { + size_t len = strlen(g_proc_pid_maps_vsyscall); + /* TODO "ProtectionKey:" */ + assert(rv > len); + assert(memcmp(buf, g_proc_pid_maps_vsyscall, len) == 0); + } + return EXIT_SUCCESS; + } +} + +static const char g_smaps_rollup[] = +"00000000-00000000 ---p 00000000 00:00 0 [rollup]\n" +"Rss: 0 kB\n" +"Pss: 0 kB\n" +"Pss_Dirty: 0 kB\n" +"Pss_Anon: 0 kB\n" +"Pss_File: 0 kB\n" +"Pss_Shmem: 0 kB\n" +"Shared_Clean: 0 kB\n" +"Shared_Dirty: 0 kB\n" +"Private_Clean: 0 kB\n" +"Private_Dirty: 0 kB\n" +"Referenced: 0 kB\n" +"Anonymous: 0 kB\n" +"LazyFree: 0 kB\n" +"AnonHugePages: 0 kB\n" +"ShmemPmdMapped: 0 kB\n" +"FilePmdMapped: 0 kB\n" +"Shared_Hugetlb: 0 kB\n" +"Private_Hugetlb: 0 kB\n" +"Swap: 0 kB\n" +"SwapPss: 0 kB\n" +"Locked: 0 kB\n" +; + +static int test_proc_pid_smaps_rollup(pid_t pid) +{ + char buf[4096]; + snprintf(buf, sizeof(buf), "/proc/%u/smaps_rollup", pid); + int fd = open(buf, O_RDONLY); + if (fd == -1) { + if (errno == ENOENT) { + /* + * /proc/${pid}/smaps_rollup is under CONFIG_PROC_PAGE_MONITOR, + * it doesn't necessarily exist. + */ + return EXIT_SUCCESS; + } + perror("open /proc/${pid}/smaps_rollup"); + return EXIT_FAILURE; + } else { + ssize_t rv = read(fd, buf, sizeof(buf)); + close(fd); + assert(rv == sizeof(g_smaps_rollup) - 1); + assert(memcmp(buf, g_smaps_rollup, sizeof(g_smaps_rollup) - 1) == 0); + return EXIT_SUCCESS; + } +} + +int main(void) +{ + int rv = EXIT_SUCCESS; + + vsyscall(); + + switch (g_vsyscall) { + case 0: + g_proc_pid_maps_vsyscall = proc_pid_maps_vsyscall_0; + g_proc_pid_smaps_vsyscall = proc_pid_smaps_vsyscall_0; + break; + case 1: + g_proc_pid_maps_vsyscall = proc_pid_maps_vsyscall_1; + g_proc_pid_smaps_vsyscall = proc_pid_smaps_vsyscall_1; + break; + case 2: + g_proc_pid_maps_vsyscall = proc_pid_maps_vsyscall_2; + g_proc_pid_smaps_vsyscall = proc_pid_smaps_vsyscall_2; + break; + default: + abort(); + } + + pid_t pid = fork(); + if (pid == -1) { + perror("fork"); + return EXIT_FAILURE; + } else if (pid == 0) { + rv = ptrace(PTRACE_TRACEME, 0, NULL, NULL); + if (rv != 0) { + if (errno == EPERM) { + fprintf(stderr, +"Did you know? ptrace(PTRACE_TRACEME) doesn't work under strace.\n" + ); + kill(getppid(), SIGTERM); + return EXIT_FAILURE; + } + perror("ptrace PTRACE_TRACEME"); + return EXIT_FAILURE; + } + + /* + * Hide "segfault at ..." messages. Signal handler won't run. + */ + struct sigaction act = {}; + act.sa_flags = SA_SIGINFO; + act.sa_sigaction = sigaction_SIGSEGV; + sigaction(SIGSEGV, &act, NULL); + +#ifdef __amd64__ + munmap(NULL, ((size_t)1 << 47) - 4096); +#else +#error "implement 'unmap everything'" +#endif + return EXIT_FAILURE; + } else { + /* + * TODO find reliable way to signal parent that munmap(2) completed. + * Child can't do it directly because it effectively doesn't exist + * anymore. Looking at child's VM files isn't 100% reliable either: + * due to a bug they may not become empty or empty-like. + */ + sleep(1); + + if (rv == EXIT_SUCCESS) { + rv = test_proc_pid_maps(pid); + } + if (rv == EXIT_SUCCESS) { + rv = test_proc_pid_numa_maps(pid); + } + if (rv == EXIT_SUCCESS) { + rv = test_proc_pid_smaps(pid); + } + if (rv == EXIT_SUCCESS) { + rv = test_proc_pid_smaps_rollup(pid); + } + /* + * TODO test /proc/${pid}/statm, task_statm() + * ->start_code, ->end_code aren't updated by munmap(). + * Output can be "0 0 0 2 0 0 0\n" where "2" can be anything. + */ + + /* Cut the rope. */ + int wstatus; + waitpid(pid, &wstatus, 0); + assert(WIFSTOPPED(wstatus)); + assert(WSTOPSIG(wstatus) == SIGSEGV); + } + + return rv; +} diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c index e5962f4794f5..69551bfa215c 100644 --- a/tools/testing/selftests/proc/proc-pid-vm.c +++ b/tools/testing/selftests/proc/proc-pid-vm.c @@ -213,22 +213,22 @@ static int make_exe(const uint8_t *payload, size_t len) /* * 0: vsyscall VMA doesn't exist vsyscall=none - * 1: vsyscall VMA is r-xp vsyscall=emulate - * 2: vsyscall VMA is --xp vsyscall=xonly + * 1: vsyscall VMA is --xp vsyscall=xonly + * 2: vsyscall VMA is r-xp vsyscall=emulate */ -static int g_vsyscall; +static volatile int g_vsyscall; static const char *str_vsyscall; static const char str_vsyscall_0[] = ""; static const char str_vsyscall_1[] = -"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; -static const char str_vsyscall_2[] = "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]\n"; +static const char str_vsyscall_2[] = +"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; #ifdef __x86_64__ static void sigaction_SIGSEGV(int _, siginfo_t *__, void *___) { - _exit(1); + _exit(g_vsyscall); } /* @@ -255,6 +255,7 @@ static void vsyscall(void) act.sa_sigaction = sigaction_SIGSEGV; (void)sigaction(SIGSEGV, &act, NULL); + g_vsyscall = 0; /* gettimeofday(NULL, NULL); */ asm volatile ( "call %P0" @@ -262,45 +263,20 @@ static void vsyscall(void) : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL) : "rax", "rcx", "r11" ); - exit(0); - } - waitpid(pid, &wstatus, 0); - if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) { - /* vsyscall page exists and is executable. */ - } else { - /* vsyscall page doesn't exist. */ - g_vsyscall = 0; - return; - } - - pid = fork(); - if (pid < 0) { - fprintf(stderr, "fork, errno %d\n", errno); - exit(1); - } - if (pid == 0) { - struct rlimit rlim = {0, 0}; - (void)setrlimit(RLIMIT_CORE, &rlim); - - /* Hide "segfault at ffffffffff600000" messages. */ - struct sigaction act; - memset(&act, 0, sizeof(struct sigaction)); - act.sa_flags = SA_SIGINFO; - act.sa_sigaction = sigaction_SIGSEGV; - (void)sigaction(SIGSEGV, &act, NULL); + g_vsyscall = 1; *(volatile int *)0xffffffffff600000UL; - exit(0); + + g_vsyscall = 2; + exit(g_vsyscall); } waitpid(pid, &wstatus, 0); - if (WIFEXITED(wstatus) && WEXITSTATUS(wstatus) == 0) { - /* vsyscall page is readable and executable. */ - g_vsyscall = 1; - return; + if (WIFEXITED(wstatus)) { + g_vsyscall = WEXITSTATUS(wstatus); + } else { + fprintf(stderr, "error: wstatus %08x\n", wstatus); + exit(1); } - - /* vsyscall page is executable but unreadable. */ - g_vsyscall = 2; } int main(void) diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c index dc838e26a5b9..ee01e40e8bc6 100644 --- a/usr/gen_init_cpio.c +++ b/usr/gen_init_cpio.c @@ -326,7 +326,7 @@ static int cpio_mkfile(const char *name, const char *location, char s[256]; struct stat buf; unsigned long size; - int file = -1; + int file; int retval; int rc = -1; int namesize; diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index ce1b01d02c51..495ceabffe88 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -24,6 +24,9 @@ struct kvm_vfio_group { struct list_head node; struct file *file; +#ifdef CONFIG_SPAPR_TCE_IOMMU + struct iommu_group *iommu_group; +#endif }; struct kvm_vfio { @@ -61,6 +64,23 @@ static bool kvm_vfio_file_enforced_coherent(struct file *file) return ret; } +static bool kvm_vfio_file_is_group(struct file *file) +{ + bool (*fn)(struct file *file); + bool ret; + + fn = symbol_get(vfio_file_is_group); + if (!fn) + return false; + + ret = fn(file); + + symbol_put(vfio_file_is_group); + + return ret; +} + +#ifdef CONFIG_SPAPR_TCE_IOMMU static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) { struct iommu_group *(*fn)(struct file *file); @@ -77,16 +97,15 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) return ret; } -#ifdef CONFIG_SPAPR_TCE_IOMMU static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, struct kvm_vfio_group *kvg) { - struct iommu_group *grp = kvm_vfio_file_iommu_group(kvg->file); - - if (WARN_ON_ONCE(!grp)) + if (WARN_ON_ONCE(!kvg->iommu_group)) return; - kvm_spapr_tce_release_iommu_group(kvm, grp); + kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group); + iommu_group_put(kvg->iommu_group); + kvg->iommu_group = NULL; } #endif @@ -136,7 +155,7 @@ static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd) return -EBADF; /* Ensure the FD is a vfio group FD.*/ - if (!kvm_vfio_file_iommu_group(filp)) { + if (!kvm_vfio_file_is_group(filp)) { ret = -EINVAL; goto err_fput; } @@ -236,19 +255,19 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev, mutex_lock(&kv->lock); list_for_each_entry(kvg, &kv->group_list, node) { - struct iommu_group *grp; - if (kvg->file != f.file) continue; - grp = kvm_vfio_file_iommu_group(kvg->file); - if (WARN_ON_ONCE(!grp)) { - ret = -EIO; - goto err_fdput; + if (!kvg->iommu_group) { + kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file); + if (WARN_ON_ONCE(!kvg->iommu_group)) { + ret = -EIO; + goto err_fdput; + } } ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, - grp); + kvg->iommu_group); break; } |